From 6eea9182db9323251cebc6f69ff6d7e5f839e4e0 Mon Sep 17 00:00:00 2001 From: xyephy Date: Fri, 27 Jun 2025 14:59:49 +0300 Subject: [PATCH 1/4] Add complete solo-ckpool integration with source code Includes all files needed for the Docker build: - Solo-ckpool source code with fractional difficulty modifications - Docker build configuration and startup scripts - Configuration files and documentation - Updated compose files and monitoring integration --- README.md | 9 +- SOLO_CKPOOL_MODIFICATIONS.md | 258 + custom-configs/solo-ckpool/README.md | 57 + custom-configs/solo-ckpool/ckpool.conf | 18 + docker-compose-config-a.yaml | 64 + docker-compose-config-c.yaml | 64 + prometheus/prometheus.yml | 10 +- solo-ckpool-source/.gitignore | 43 + solo-ckpool-source/AUTHORS | 7 + solo-ckpool-source/COPYING | 674 ++ solo-ckpool-source/ChangeLog | 4 + solo-ckpool-source/Makefile.am | 3 + solo-ckpool-source/NEWS | 0 solo-ckpool-source/README | 334 + solo-ckpool-source/README-SOLOMINING | 168 + solo-ckpool-source/autogen.sh | 2 + solo-ckpool-source/cknode.conf | 34 + solo-ckpool-source/ckpassthrough.conf | 15 + solo-ckpool-source/ckpool.conf | 42 + solo-ckpool-source/ckproxy.conf | 24 + solo-ckpool-source/ckredirector.conf | 23 + solo-ckpool-source/configure.ac | 98 + solo-ckpool-source/m4/.gitignore | 30 + solo-ckpool-source/src/Makefile.am | 42 + solo-ckpool-source/src/bitcoin.c | 424 + solo-ckpool-source/src/bitcoin.h | 27 + solo-ckpool-source/src/ckpmsg.c | 330 + solo-ckpool-source/src/ckpool.c | 1904 ++++ solo-ckpool-source/src/ckpool.h | 404 + solo-ckpool-source/src/connector.c | 1667 ++++ solo-ckpool-source/src/connector.h | 20 + solo-ckpool-source/src/generator.c | 3422 +++++++ solo-ckpool-source/src/generator.h | 30 + solo-ckpool-source/src/jansson-2.14/CHANGES | 986 ++ .../src/jansson-2.14/CMakeLists.txt | 662 ++ solo-ckpool-source/src/jansson-2.14/LICENSE | 19 + .../src/jansson-2.14/Makefile.am | 13 + .../src/jansson-2.14/README.rst | 81 + .../src/jansson-2.14/android/jansson_config.h | 43 + .../cmake/CheckFunctionKeywords.cmake | 15 + .../src/jansson-2.14/cmake/CodeCoverage.cmake | 163 + .../src/jansson-2.14/cmake/FindSphinx.cmake | 315 + .../jansson-2.14/cmake/janssonConfig.cmake.in | 4 + .../jansson-2.14/cmake/jansson_config.h.cmake | 74 + .../cmake/jansson_private_config.h.cmake | 53 + .../src/jansson-2.14/configure.ac | 168 + .../src/jansson-2.14/doc/Makefile.am | 20 + .../src/jansson-2.14/doc/README | 5 + .../src/jansson-2.14/doc/apiref.rst | 2064 ++++ .../src/jansson-2.14/doc/changes.rst | 5 + .../src/jansson-2.14/doc/conf.py | 217 + .../src/jansson-2.14/doc/conformance.rst | 119 + .../src/jansson-2.14/doc/ext/refcounting.py | 69 + .../src/jansson-2.14/doc/gettingstarted.rst | 264 + .../src/jansson-2.14/doc/github_commits.c | 180 + .../src/jansson-2.14/doc/index.rst | 53 + .../src/jansson-2.14/doc/threadsafety.rst | 82 + .../src/jansson-2.14/doc/tutorial.rst | 288 + .../src/jansson-2.14/doc/upgrading.rst | 76 + .../src/jansson-2.14/examples/README.rst | 4 + .../src/jansson-2.14/examples/simple_parse.c | 200 + .../src/jansson-2.14/jansson.pc.in | 10 + .../jansson-2.14/jansson_private_config.h.in | 160 + .../src/jansson-2.14/scripts/clang-format | 3 + .../jansson-2.14/scripts/clang-format-check | 27 + .../src/jansson-2.14/src/Makefile.am | 30 + .../src/jansson-2.14/src/dump.c | 492 + .../src/jansson-2.14/src/error.c | 59 + .../src/jansson-2.14/src/hashtable.c | 340 + .../src/jansson-2.14/src/hashtable.h | 186 + .../src/jansson-2.14/src/hashtable_seed.c | 277 + .../src/jansson-2.14/src/jansson.def | 83 + .../src/jansson-2.14/src/jansson.h | 422 + .../src/jansson-2.14/src/jansson_config.h.in | 51 + .../src/jansson-2.14/src/jansson_private.h | 118 + .../src/jansson-2.14/src/load.c | 1106 +++ .../src/jansson-2.14/src/lookup3.h | 382 + .../src/jansson-2.14/src/memory.c | 81 + .../src/jansson-2.14/src/pack_unpack.c | 937 ++ .../src/jansson-2.14/src/strbuffer.c | 103 + .../src/jansson-2.14/src/strbuffer.h | 35 + .../src/jansson-2.14/src/strconv.c | 132 + solo-ckpool-source/src/jansson-2.14/src/utf.c | 163 + solo-ckpool-source/src/jansson-2.14/src/utf.h | 29 + .../src/jansson-2.14/src/value.c | 1112 +++ .../src/jansson-2.14/src/version.c | 28 + .../src/jansson-2.14/test-driver | 153 + solo-ckpool-source/src/libckpool.c | 2258 +++++ solo-ckpool-source/src/libckpool.h | 616 ++ solo-ckpool-source/src/notifier.c | 63 + solo-ckpool-source/src/sha2.c | 236 + solo-ckpool-source/src/sha2.h | 69 + .../open_software_license.txt | 32 + .../src/sha256_code_release/sha256_avx1.asm | 588 ++ .../sha256_code_release/sha256_avx2_rorx2.asm | 828 ++ .../src/sha256_code_release/sha256_sse4.asm | 546 ++ solo-ckpool-source/src/stratifier.c | 8617 +++++++++++++++++ solo-ckpool-source/src/stratifier.h | 102 + solo-ckpool-source/src/uthash.h | 1144 +++ solo-ckpool-source/src/utlist.h | 757 ++ solo-ckpool.dockerfile | 65 + start-ckpool.sh | 10 + 102 files changed, 38941 insertions(+), 2 deletions(-) create mode 100644 SOLO_CKPOOL_MODIFICATIONS.md create mode 100644 custom-configs/solo-ckpool/README.md create mode 100644 custom-configs/solo-ckpool/ckpool.conf create mode 100644 solo-ckpool-source/.gitignore create mode 100644 solo-ckpool-source/AUTHORS create mode 100644 solo-ckpool-source/COPYING create mode 100644 solo-ckpool-source/ChangeLog create mode 100644 solo-ckpool-source/Makefile.am create mode 100644 solo-ckpool-source/NEWS create mode 100644 solo-ckpool-source/README create mode 100644 solo-ckpool-source/README-SOLOMINING create mode 100755 solo-ckpool-source/autogen.sh create mode 100644 solo-ckpool-source/cknode.conf create mode 100644 solo-ckpool-source/ckpassthrough.conf create mode 100644 solo-ckpool-source/ckpool.conf create mode 100644 solo-ckpool-source/ckproxy.conf create mode 100644 solo-ckpool-source/ckredirector.conf create mode 100644 solo-ckpool-source/configure.ac create mode 100644 solo-ckpool-source/m4/.gitignore create mode 100644 solo-ckpool-source/src/Makefile.am create mode 100644 solo-ckpool-source/src/bitcoin.c create mode 100644 solo-ckpool-source/src/bitcoin.h create mode 100644 solo-ckpool-source/src/ckpmsg.c create mode 100644 solo-ckpool-source/src/ckpool.c create mode 100644 solo-ckpool-source/src/ckpool.h create mode 100644 solo-ckpool-source/src/connector.c create mode 100644 solo-ckpool-source/src/connector.h create mode 100644 solo-ckpool-source/src/generator.c create mode 100644 solo-ckpool-source/src/generator.h create mode 100644 solo-ckpool-source/src/jansson-2.14/CHANGES create mode 100644 solo-ckpool-source/src/jansson-2.14/CMakeLists.txt create mode 100644 solo-ckpool-source/src/jansson-2.14/LICENSE create mode 100644 solo-ckpool-source/src/jansson-2.14/Makefile.am create mode 100644 solo-ckpool-source/src/jansson-2.14/README.rst create mode 100644 solo-ckpool-source/src/jansson-2.14/android/jansson_config.h create mode 100644 solo-ckpool-source/src/jansson-2.14/cmake/CheckFunctionKeywords.cmake create mode 100644 solo-ckpool-source/src/jansson-2.14/cmake/CodeCoverage.cmake create mode 100644 solo-ckpool-source/src/jansson-2.14/cmake/FindSphinx.cmake create mode 100644 solo-ckpool-source/src/jansson-2.14/cmake/janssonConfig.cmake.in create mode 100644 solo-ckpool-source/src/jansson-2.14/cmake/jansson_config.h.cmake create mode 100644 solo-ckpool-source/src/jansson-2.14/cmake/jansson_private_config.h.cmake create mode 100644 solo-ckpool-source/src/jansson-2.14/configure.ac create mode 100644 solo-ckpool-source/src/jansson-2.14/doc/Makefile.am create mode 100644 solo-ckpool-source/src/jansson-2.14/doc/README create mode 100644 solo-ckpool-source/src/jansson-2.14/doc/apiref.rst create mode 100644 solo-ckpool-source/src/jansson-2.14/doc/changes.rst create mode 100644 solo-ckpool-source/src/jansson-2.14/doc/conf.py create mode 100644 solo-ckpool-source/src/jansson-2.14/doc/conformance.rst create mode 100644 solo-ckpool-source/src/jansson-2.14/doc/ext/refcounting.py create mode 100644 solo-ckpool-source/src/jansson-2.14/doc/gettingstarted.rst create mode 100644 solo-ckpool-source/src/jansson-2.14/doc/github_commits.c create mode 100644 solo-ckpool-source/src/jansson-2.14/doc/index.rst create mode 100644 solo-ckpool-source/src/jansson-2.14/doc/threadsafety.rst create mode 100644 solo-ckpool-source/src/jansson-2.14/doc/tutorial.rst create mode 100644 solo-ckpool-source/src/jansson-2.14/doc/upgrading.rst create mode 100644 solo-ckpool-source/src/jansson-2.14/examples/README.rst create mode 100644 solo-ckpool-source/src/jansson-2.14/examples/simple_parse.c create mode 100644 solo-ckpool-source/src/jansson-2.14/jansson.pc.in create mode 100644 solo-ckpool-source/src/jansson-2.14/jansson_private_config.h.in create mode 100755 solo-ckpool-source/src/jansson-2.14/scripts/clang-format create mode 100755 solo-ckpool-source/src/jansson-2.14/scripts/clang-format-check create mode 100644 solo-ckpool-source/src/jansson-2.14/src/Makefile.am create mode 100644 solo-ckpool-source/src/jansson-2.14/src/dump.c create mode 100644 solo-ckpool-source/src/jansson-2.14/src/error.c create mode 100644 solo-ckpool-source/src/jansson-2.14/src/hashtable.c create mode 100644 solo-ckpool-source/src/jansson-2.14/src/hashtable.h create mode 100644 solo-ckpool-source/src/jansson-2.14/src/hashtable_seed.c create mode 100644 solo-ckpool-source/src/jansson-2.14/src/jansson.def create mode 100644 solo-ckpool-source/src/jansson-2.14/src/jansson.h create mode 100644 solo-ckpool-source/src/jansson-2.14/src/jansson_config.h.in create mode 100644 solo-ckpool-source/src/jansson-2.14/src/jansson_private.h create mode 100644 solo-ckpool-source/src/jansson-2.14/src/load.c create mode 100644 solo-ckpool-source/src/jansson-2.14/src/lookup3.h create mode 100644 solo-ckpool-source/src/jansson-2.14/src/memory.c create mode 100644 solo-ckpool-source/src/jansson-2.14/src/pack_unpack.c create mode 100644 solo-ckpool-source/src/jansson-2.14/src/strbuffer.c create mode 100644 solo-ckpool-source/src/jansson-2.14/src/strbuffer.h create mode 100644 solo-ckpool-source/src/jansson-2.14/src/strconv.c create mode 100644 solo-ckpool-source/src/jansson-2.14/src/utf.c create mode 100644 solo-ckpool-source/src/jansson-2.14/src/utf.h create mode 100644 solo-ckpool-source/src/jansson-2.14/src/value.c create mode 100644 solo-ckpool-source/src/jansson-2.14/src/version.c create mode 100755 solo-ckpool-source/src/jansson-2.14/test-driver create mode 100644 solo-ckpool-source/src/libckpool.c create mode 100644 solo-ckpool-source/src/libckpool.h create mode 100644 solo-ckpool-source/src/notifier.c create mode 100644 solo-ckpool-source/src/sha2.c create mode 100644 solo-ckpool-source/src/sha2.h create mode 100644 solo-ckpool-source/src/sha256_code_release/open_software_license.txt create mode 100644 solo-ckpool-source/src/sha256_code_release/sha256_avx1.asm create mode 100644 solo-ckpool-source/src/sha256_code_release/sha256_avx2_rorx2.asm create mode 100644 solo-ckpool-source/src/sha256_code_release/sha256_sse4.asm create mode 100644 solo-ckpool-source/src/stratifier.c create mode 100644 solo-ckpool-source/src/stratifier.h create mode 100644 solo-ckpool-source/src/uthash.h create mode 100644 solo-ckpool-source/src/utlist.h create mode 100644 solo-ckpool.dockerfile create mode 100755 start-ckpool.sh diff --git a/README.md b/README.md index 0256804..20754ad 100644 --- a/README.md +++ b/README.md @@ -116,13 +116,20 @@ If you prefer to set up the benchmarking tool manually, follow these detailed st ``` 4. **Point miners to the following endpoints** - - For Stratum V1: + - For Stratum V1 (Public Pool): ```bash stratum+tcp://:3333 ``` 🚨 For SV1, you should use the address format `[bitcoin_address].[nickname]` as the username in your miner setup. E.g. to correctly run a CPU miner, you need to run it with: `./minerd -a sha256d -o stratum+tcp://127.0.0.1:3333 -q -D -P -u tb1qa0sm0hxzj0x25rh8gw5xlzwlsfvvyz8u96w3p8.sv2-gitgab19` + - For Stratum V1 (Solo-CKPool): + ```bash + stratum+tcp://:3335 + ``` + 🚨 For Solo-CKPool, the username MUST be a valid Bitcoin address for the target network. + E.g. to correctly run a CPU miner, you need to run it with: `./minerd -a sha256d -o stratum+tcp://127.0.0.1:3335 -q -D -P -u tb1qa0sm0hxzj0x25rh8gw5xlzwlsfvvyz8u96w3p8` + - For Stratum V2: ```bash stratum+tcp://:34255 diff --git a/SOLO_CKPOOL_MODIFICATIONS.md b/SOLO_CKPOOL_MODIFICATIONS.md new file mode 100644 index 0000000..3753623 --- /dev/null +++ b/SOLO_CKPOOL_MODIFICATIONS.md @@ -0,0 +1,258 @@ +# Solo-CKPool Integration for Benchmarking Tool + +This document describes the implementation of GitHub issue #23: integrating solo-ckpool as an SV1 pool option for optimization measurements and comparisons. + +## Overview + +Solo-ckpool has been successfully integrated into the benchmarking tool to provide additional SV1 pool comparison capabilities. This integration enables performance comparisons between: + +- **public-pool** (Node.js-based) vs **solo-ckpool** (C-based) +- **Pool-based solo mining** vs **True solo mining** +- **Different SV1 implementations** under identical conditions + +## Architecture + +### Network Layout + +``` +┌─────────────────┬─────────────────┬─────────────────┐ +│ Public Pool │ Solo-CKPool │ SV2 │ +│ (Node.js) │ (C) │ (Various) │ +├─────────────────┼─────────────────┼─────────────────┤ +│ 10.5.0.8:3332 │ 10.5.0.24:3333 │ 10.5.0.4:34254 │ +│ ↕ │ ↕ │ ↕ │ +│ Pool-Miner │ Solo-CKPool │ SV2 Proxies │ +│ Proxy │ Proxy │ │ +│ 10.5.0.19:3333 │ 10.5.0.25:3335 │ Multiple ports │ +│ ↕ │ ↕ │ ↕ │ +│ Miners │ Miners │ Miners │ +└─────────────────┴─────────────────┴─────────────────┘ + ↕ + Bitcoin Node (10.5.0.16) + via proxy (10.5.0.21) +``` + +### Integration Components + +1. **solo-ckpool.dockerfile**: Builds solo-ckpool from source +2. **solo-ckpool service**: Runs ckpool in BTCSOLO mode (-B flag) +3. **solo-ckpool-miner-proxy**: Monitors traffic and collects metrics +4. **Configuration files**: Pool settings and Bitcoin daemon connection +5. **Prometheus integration**: Metrics collection and monitoring + +## Configuration + +### Pool Configuration (`custom-configs/solo-ckpool/ckpool.conf`) + +```json +{ + "btcd": [{ + "url": "10.5.0.21:48330", + "auth": "username", + "pass": "password", + "notify": true + }], + "mindiff": 1, + "startdiff": 42, + "maxdiff": 0, + "serverurl": ["0.0.0.0:3333"], + "logdir": "/var/log/ckpool", + "sockdir": "/var/log/ckpool", + "loginterval": 60, + "update_interval": 30, + "zmqblock": "tcp://10.5.0.16:28332" +} +``` + +### Network Endpoints + +- **Solo-CKPool Stratum**: `stratum+tcp://:3335` +- **Public Pool Stratum**: `stratum+tcp://:3333` +- **SV2 Stratum**: `stratum+tcp://:34255` + +### Miner Setup + +```bash +# Solo-CKPool (requires valid Bitcoin address as username) +./minerd -a sha256d -o stratum+tcp://127.0.0.1:3335 \ + -u tb1qa0sm0hxzj0x25rh8gw5xlzwlsfvvyz8u96w3p8.solo-ckpool -q -D -P + +# Public Pool (standard username format) +./minerd -a sha256d -o stratum+tcp://127.0.0.1:3333 \ + -u tb1qa0sm0hxzj0x25rh8gw5xlzwlsfvvyz8u96w3p8.public-pool -q -D -P +``` + +## Key Features + +### Solo Mining Capabilities + +- **True Solo Mining**: Each miner gets individual coinbase transactions +- **Address Validation**: Usernames must be valid Bitcoin addresses +- **Direct Block Rewards**: 100% of block rewards go to solving miner +- **No Pool Accounting**: Simplified architecture without share tracking + +### Performance Characteristics + +- **C Implementation**: Optimized low-level performance +- **Memory Efficient**: Minimal RAM usage compared to Node.js pools +- **High Concurrency**: Handles large numbers of connections efficiently +- **Hardware Acceleration**: Supports optimized SHA256 (AVX2/SSE4) + +### Monitoring Integration + +All existing SV1 metrics are collected: + +- **Share Metrics**: Submitted, valid, stale shares and acceptance rates +- **Latency Metrics**: Job delivery times and block propagation +- **Bandwidth Metrics**: Farm and pool level network usage +- **Resource Metrics**: CPU, memory usage via cadvisor +- **Block Metrics**: Template values and blocks found + +## Enhanced Features + +### Fractional Difficulty Support ✅ + +**NEW**: Solo-ckpool now supports fractional difficulty values (including values < 1.0). + +**Enhanced Capabilities**: +- ✅ Support for fractional difficulty values (e.g., 0.1, 0.5, 2.5) +- ✅ Backward compatibility with integer configurations +- ✅ Suitable for low-hashrate testing scenarios +- ✅ Improved benchmarking accuracy for performance comparisons + +**Configuration Examples**: +```json +{ + "mindiff": 0.1, // Minimum difficulty of 0.1 (fractional) + "startdiff": 0.5, // Starting difficulty of 0.5 (fractional) + "maxdiff": 100.0 // Maximum difficulty of 100.0 +} +``` + +**Backward Compatibility**: +- Integer values still work: `"mindiff": 1, "startdiff": 42` +- Mixed configurations supported: `"mindiff": 0.5, "startdiff": 10` + +## Limitations and Considerations + +### Solo Mining Requirements + +- **Valid Addresses**: Miner usernames must be valid Bitcoin addresses for the target network +- **Network Sync**: Requires fully synced Bitcoin node for optimal performance +- **Block Notifications**: Needs ZMQ or blocknotify for real-time updates + +## Usage Instructions + +### Starting the Benchmarking Tool + +```bash +# Configuration A (with local JDC) +docker compose -f docker-compose-config-a.yaml up -d + +# Configuration C (pool-managed) +docker compose -f docker-compose-config-c.yaml up -d +``` + +### Connecting Miners + +Connect miners to different endpoints for comparison: + +```bash +# Terminal 1: Solo-CKPool +./minerd -a sha256d -o stratum+tcp://127.0.0.1:3335 \ + -u .solo-ckpool -q -D -P + +# Terminal 2: Public Pool +./minerd -a sha256d -o stratum+tcp://127.0.0.1:3333 \ + -u .public-pool -q -D -P + +# Terminal 3: SV2 (via translator) +./minerd -a sha256d -o stratum+tcp://127.0.0.1:34255 -q -D -P +``` + +### Monitoring Results + +Access Grafana dashboard: `http://localhost:3000/d/64nrElFmk/sri-benchmarking-tool` + +The dashboard will show comparative metrics between: +- Public Pool vs Solo-CKPool performance +- SV1 vs SV2 protocol efficiency +- Resource usage and network characteristics + +## Expected Performance Differences + +### Solo-CKPool Advantages + +1. **Lower Memory Usage**: C implementation more memory efficient +2. **Higher Connection Capacity**: Better handling of concurrent miners +3. **Faster Processing**: Optimized protocol handling and validation +4. **Authentic Solo Mining**: True individual coinbase generation +5. **✅ NEW: Fractional Difficulty**: Support for difficulty < 1.0 with enhanced precision + +### Public Pool Advantages + +1. **Feature Rich**: More configuration options and pool features +2. **Detailed Logging**: More comprehensive debugging information +3. **Flexibility**: Easier to modify and customize +4. **Web Interface**: Built-in monitoring and management tools + +## Troubleshooting + +### Common Issues + +1. **Address Validation Errors** + - Ensure usernames are valid Bitcoin addresses for target network + - Use correct address format (testnet vs mainnet) + +2. **Connection Failures** + - Check that solo-ckpool service is running: `docker logs solo-ckpool` + - Verify Bitcoin node connectivity: `docker logs sv1-node-pool-side` + +3. **Metrics Not Appearing** + - Confirm proxy is running: `docker logs solo-ckpool-miner-proxy` + - Check Prometheus targets: `http://localhost:9090/targets` + +### Log Analysis + +```bash +# Solo-CKPool logs +docker logs solo-ckpool + +# Proxy metrics logs +docker logs solo-ckpool-miner-proxy + +# Bitcoin node logs +docker logs sv1-node-pool-side +``` + +## Future Enhancements + +### Planned Improvements + +1. **Fractional Difficulty Support**: Wrapper layer for difficulty scaling +2. **Enhanced Metrics**: Solo-mining specific performance indicators +3. **Configuration Profiles**: Different solo mining scenarios +4. **Multi-Pool Comparison**: Side-by-side analysis of multiple pools + +### Integration Opportunities + +- **ASIC Testing**: Real hardware performance comparisons +- **Mainnet Benchmarks**: Production environment analysis +- **Scalability Tests**: Large-scale miner farm simulations +- **Network Analysis**: Latency impact studies across different pools + +## Contributing + +To modify or extend the solo-ckpool integration: + +1. **Configuration Changes**: Edit `custom-configs/solo-ckpool/ckpool.conf` +2. **Docker Modifications**: Update `solo-ckpool.dockerfile` +3. **Service Changes**: Modify docker-compose files +4. **Metrics Addition**: Extend Prometheus configuration + +## References + +- [Solo-CKPool Repository](https://bitbucket.org/ckolivas/ckpool-solo) +- [Benchmarking Tool Documentation](./docs/benchmarking-tool-overview.md) +- [SV1 Protocol Specification](https://braiins.com/stratum-v1/docs) +- [GitHub Issue #23](https://github.com/stratum-mining/benchmarking-tool/issues/23) \ No newline at end of file diff --git a/custom-configs/solo-ckpool/README.md b/custom-configs/solo-ckpool/README.md new file mode 100644 index 0000000..25d53b0 --- /dev/null +++ b/custom-configs/solo-ckpool/README.md @@ -0,0 +1,57 @@ +# Solo-CKPool Configuration + +This directory contains configuration files for integrating solo-ckpool into the benchmarking tool. + +## Files + +- `ckpool.conf` - Main solo-ckpool configuration file +- `README.md` - This documentation file + +## Configuration Parameters + +### Bitcoin Daemon Connection +- **URL**: `10.5.0.21:48330` - Connects through sv1-node-pool-proxy for metrics collection +- **Auth**: Basic authentication credentials (username/password) +- **Notify**: Enables block notifications from Bitcoin daemon + +### Difficulty Settings +- **mindiff**: 1 - Minimum difficulty (solo-ckpool limitation: integers only) +- **startdiff**: 42 - Starting difficulty for new miners +- **maxdiff**: 0 - Maximum difficulty (0 = unlimited) + +### Network Settings +- **serverurl**: `0.0.0.0:3333` - Stratum server binding +- **zmqblock**: `tcp://10.5.0.16:28332` - ZMQ block notification endpoint + +### Logging +- **logdir**: `/var/log/ckpool` - Log file directory +- **sockdir**: `/var/log/ckpool` - Unix socket directory +- **loginterval**: 60 - Log interval in seconds + +## Usage Notes + +1. **Solo Mining Mode**: Pool runs with `-B` flag for BTCSOLO mode +2. **Address Validation**: Miner usernames must be valid Bitcoin addresses +3. **Fractional Difficulty**: Not supported - minimum difficulty is 1 +4. **Network Integration**: Uses existing Bitcoin node through proxy for monitoring + +## Testing + +Connect miners using: +```bash +# For testnet +./minerd -a sha256d -o stratum+tcp://:3335 -u .solo-ckpool -q -D -P + +# Example with valid testnet address +./minerd -a sha256d -o stratum+tcp://127.0.0.1:3335 -u tb1qa0sm0hxzj0x25rh8gw5xlzwlsfvvyz8u96w3p8.solo-ckpool -q -D -P +``` + +## Metrics Integration + +Solo-ckpool integrates with the existing SV1 metrics collection system: +- Share submissions, valid/stale shares +- Block template values and propagation times +- Network bandwidth usage (farm and pool level) +- Container resource usage (CPU, memory) + +All metrics are collected through the `solo-ckpool-miner-proxy` component. \ No newline at end of file diff --git a/custom-configs/solo-ckpool/ckpool.conf b/custom-configs/solo-ckpool/ckpool.conf new file mode 100644 index 0000000..76bc53c --- /dev/null +++ b/custom-configs/solo-ckpool/ckpool.conf @@ -0,0 +1,18 @@ +{ + "btcd": [{ + "url": "10.5.0.21:48330", + "auth": "username", + "pass": "password", + "notify": true + }], + "mindiff": 0.1, + "startdiff": 0.5, + "maxdiff": 0.0, + "btcaddress": "tb1qa0sm0hxzj0x25rh8gw5xlzwlsfvvyz8u96w3p8", + "serverurl": ["0.0.0.0:3333"], + "logdir": "/var/log/ckpool", + "sockdir": "/var/log/ckpool", + "loginterval": 60, + "update_interval": 30, + "zmqblock": "tcp://10.5.0.16:28332" +} \ No newline at end of file diff --git a/docker-compose-config-a.yaml b/docker-compose-config-a.yaml index ad134a7..8d16f9d 100644 --- a/docker-compose-config-a.yaml +++ b/docker-compose-config-a.yaml @@ -58,6 +58,13 @@ services: image: pools-latency-calculator-builder-image command: echo "pools-latency-calculator build completed" + solo-ckpool-builder: + build: + dockerfile: ./solo-ckpool.dockerfile + container_name: solo-ckpool-builder + image: solo-ckpool-builder-image + command: echo "solo-ckpool build completed" + template-provider-pool-side: labels: logging: "config-a" @@ -408,6 +415,63 @@ services: sv2-net: ipv4_address: 10.5.0.21 + solo-ckpool: + image: solo-ckpool-builder-image + container_name: solo-ckpool + labels: + logging: "config-a" + command: + [ + "/bin/sh", + "-c", + "/usr/local/bin/start-ckpool.sh", + ] + ports: + - "3336:3333" + environment: + - NETWORK=${NETWORK} + volumes: + - "./custom-configs/solo-ckpool/ckpool.conf:/etc/ckpool/ckpool.conf:ro" + restart: unless-stopped + depends_on: + - solo-ckpool-builder + - sv1-node-pool-proxy + networks: + sv2-net: + ipv4_address: 10.5.0.24 + cap_add: + - NET_ADMIN + + solo-ckpool-miner-proxy: + image: sv1-custom-proxy-builder-image + labels: + logging: "config-a" + command: + [ + "/bin/sh", + "-c", + "./monitor_and_apply_latency.sh 10.5.0.24 2 & exec ./sv1-custom-proxy", + ] + ports: + - "3335:3335" + - "2346:2346" + environment: + - SERVER=10.5.0.24:3333 + - CLIENT=0.0.0.0:3335 + - PROM_ADDRESS=10.5.0.25:2346 + - PROXY_TYPE=pool-miner + - RUST_LOG=${LOG_LEVEL} + container_name: solo-ckpool-miner-proxy + depends_on: + - sv1-custom-proxy-builder + - solo-ckpool + restart: unless-stopped + networks: + sv2-net: + ipv4_address: 10.5.0.25 + cap_add: + - NET_ADMIN + monitor-traffic-tcpdump: image: inzania/network-traffic-metrics:latest network_mode: host diff --git a/docker-compose-config-c.yaml b/docker-compose-config-c.yaml index 059b113..84f998c 100644 --- a/docker-compose-config-c.yaml +++ b/docker-compose-config-c.yaml @@ -57,6 +57,13 @@ services: image: pools-latency-calculator-builder-image command: echo "pools-latency-calculator build completed" + solo-ckpool-builder: + build: + dockerfile: ./solo-ckpool.dockerfile + container_name: solo-ckpool-builder + image: solo-ckpool-builder-image + command: echo "solo-ckpool build completed" + template-provider-pool-side: labels: logging: "config-c" @@ -333,6 +340,63 @@ services: sv2-net: ipv4_address: 10.5.0.21 + solo-ckpool: + image: solo-ckpool-builder-image + container_name: solo-ckpool + labels: + logging: "config-c" + command: + [ + "/bin/sh", + "-c", + "/usr/local/bin/start-ckpool.sh", + ] + ports: + - "3336:3333" + environment: + - NETWORK=${NETWORK} + volumes: + - "./custom-configs/solo-ckpool/ckpool.conf:/etc/ckpool/ckpool.conf:ro" + restart: unless-stopped + depends_on: + - solo-ckpool-builder + - sv1-node-pool-proxy + networks: + sv2-net: + ipv4_address: 10.5.0.24 + cap_add: + - NET_ADMIN + + solo-ckpool-miner-proxy: + image: sv1-custom-proxy-builder-image + labels: + logging: "config-c" + command: + [ + "/bin/sh", + "-c", + "./monitor_and_apply_latency.sh 10.5.0.24 2 & exec ./sv1-custom-proxy", + ] + ports: + - "3335:3335" + - "2346:2346" + environment: + - SERVER=10.5.0.24:3333 + - CLIENT=0.0.0.0:3335 + - PROM_ADDRESS=10.5.0.25:2346 + - PROXY_TYPE=pool-miner + - RUST_LOG=${LOG_LEVEL} + container_name: solo-ckpool-miner-proxy + depends_on: + - sv1-custom-proxy-builder + - solo-ckpool + restart: unless-stopped + networks: + sv2-net: + ipv4_address: 10.5.0.25 + cap_add: + - NET_ADMIN + monitor-traffic-tcpdump: image: inzania/network-traffic-metrics:latest network_mode: host diff --git a/prometheus/prometheus.yml b/prometheus/prometheus.yml index a0375b8..1a0deff 100644 --- a/prometheus/prometheus.yml +++ b/prometheus/prometheus.yml @@ -109,4 +109,12 @@ scrape_configs: scrape_interval: 5s static_configs: - - targets: ['sv2-translator-miner-proxy:5676'] # The Network Traffic Metrics IP/port \ No newline at end of file + - targets: ['sv2-translator-miner-proxy:5676'] # The Network Traffic Metrics IP/port + + - job_name: 'solo-ckpool-miner-proxy' + + # Override the global default and scrape targets from this job every 5 seconds. + scrape_interval: 5s + + static_configs: + - targets: ['solo-ckpool-miner-proxy:2346'] # Solo-ckpool proxy metrics \ No newline at end of file diff --git a/solo-ckpool-source/.gitignore b/solo-ckpool-source/.gitignore new file mode 100644 index 0000000..e53c249 --- /dev/null +++ b/solo-ckpool-source/.gitignore @@ -0,0 +1,43 @@ +*.o +*.bin +*.la +*.lo + +autom4te.cache +.deps + +Makefile +Makefile.in +INSTALL +aclocal.m4 +configure +depcomp +missing +install-sh +stamp-h1 +compile +config.log +config.status +config.guess +config.sub + +*~ + +ext_deps +config.h.in +config.h + +mkinstalldirs + +*.swp +src/ckpool +src/ckpmsg +ltmain.sh + +*.m4 + +.libs/ + +libtool + + diff --git a/solo-ckpool-source/AUTHORS b/solo-ckpool-source/AUTHORS new file mode 100644 index 0000000..4944848 --- /dev/null +++ b/solo-ckpool-source/AUTHORS @@ -0,0 +1,7 @@ +Con Kolivas +Core project lead, maintainer, author of ckpool and libckpool. +14BMjogz69qe8hk9thyzbmR5pg34mVKB1e + +Andrew Smith +Maintainer and author of ckdb. +1Jjk2LmktEQKnv8r2cZ9MvLiZwZ9gxabKm diff --git a/solo-ckpool-source/COPYING b/solo-ckpool-source/COPYING new file mode 100644 index 0000000..94a9ed0 --- /dev/null +++ b/solo-ckpool-source/COPYING @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/solo-ckpool-source/ChangeLog b/solo-ckpool-source/ChangeLog new file mode 100644 index 0000000..23ff63f --- /dev/null +++ b/solo-ckpool-source/ChangeLog @@ -0,0 +1,4 @@ +See git repository ('git log') for full changelog. + +Git repository can be found at: +https://bitbucket.org/ckolivas/ckpool-solo diff --git a/solo-ckpool-source/Makefile.am b/solo-ckpool-source/Makefile.am new file mode 100644 index 0000000..126dcda --- /dev/null +++ b/solo-ckpool-source/Makefile.am @@ -0,0 +1,3 @@ +ACLOCAL_AMFLAGS = -I m4 +SUBDIRS = src +EXTRA_DIST = ckpool.conf ckproxy.conf README README-SOLOMINING diff --git a/solo-ckpool-source/NEWS b/solo-ckpool-source/NEWS new file mode 100644 index 0000000..e69de29 diff --git a/solo-ckpool-source/README b/solo-ckpool-source/README new file mode 100644 index 0000000..154361e --- /dev/null +++ b/solo-ckpool-source/README @@ -0,0 +1,334 @@ +CKPOOL + CKPROXY + libckpool by Con Kolivas + +Ultra low overhead massively scalable multi-process, multi-threaded modular +bitcoin mining pool, proxy, passthrough, and library in c for Linux. + +CKPOOL is code provided free of charge under the GPLv3 license but its development +is mostly paid for by commissioned funding, and the pool by default contributes +0.5% of solved blocks in pool mode to the development team. Please consider leaving +this contribution in the code if you are running it on a pool or contributing to the +authors listed in AUTHORS if you use this code to aid funding further development. + +--- +LICENSE: + +GNU Public license V3. See included COPYING for details. + + +--- +DESIGN: + +Architecture: + +- Low level hand coded architecture relying on minimal outside libraries beyond +basic glibc functions for maximum flexibility and minimal overhead that can be +built and deployed on any Linux installation. + +- Multiprocess+multithreaded design to scale to massive deployments and +capitalise on modern multicore/multithread CPU designs. + +- Minimal memory overhead. + +- Utilises ultra reliable unix sockets for communication with dependent +processes. + +- Modular code design to streamline further development. + +- Standalone library code that can be utilised independently of ckpool. + +- Same code can be deployed in many different modes designed to talk to each +other on the same machine, local lan or remote internet locations. + + +--- +Modes of deployment: + +- Simple pool. + +- Simple pool with per-user solo mining. + +- Simple proxy without the limitations of hashrate inherent in other proxy +solutions when talking to ckpool. + +- Passthrough node(s) that combine connections to a single socket which can +be used to scale to millions of clients and allow the main pool to be isolated +from direct communication with clients. + +- Library for use by other software. + + +--- +Features: + +- Bitcoind communication to unmodified bitcoind with multiple failover to local +or remote locations. + +- Local pool instance worker limited only by operating system resources and +can be made virtually limitless through use of multiple downstream passthrough +nodes. + +- Proxy and passthrough modes can set up multiple failover upstream pools. + +- Optional share logging. + +- Virtually seamless restarts for upgrades through socket handover from exiting +instances to new starting instance. + +- Configurable custom coinbase signature. + +- Configurable instant starting and minimum difficulty. + +- Rapid vardiff adjustment with stable unlimited maximum difficulty handling. + +- New work generation on block changes incorporate full bitcoind transaction +set without delay or requiring to send transactionless work to miners thereby +providing the best bitcoin network support and rewarding miners with the most +transaction fees. + +- Event driven communication based on communication readiness preventing +slow communicating clients from delaying low latency ones. + +- Stratum messaging system to running clients. + +- Accurate pool and per client statistics. + +- Multiple named instances can be run concurrently on the same machine. + + +--- +BUILDING: + +Building ckpool requires no dependencies outside of the basic build tools and +yasm on any linux installation. Recommended zmq notification support (ckpool +only) requires the zmq devel library installed. + + +Building with zmq (preferred build but not required for ckproxy): + +sudo apt-get install build-essential yasm libzmq3-dev + +./configure + +make + + +Basic build: + +sudo apt-get install build-essential yasm + +./configure + +make + + +Building from git also requires autoconf, automake, and pkgconf: + +sudo apt-get install build-essential yasm autoconf automake libtool libzmq3-dev pkgconf + +./autogen.sh + +./configure + +make + + +Binaries will be built in the src/ subdirectory. Binaries generated will be: + +ckpool - The main pool back end + +ckproxy - A link to ckpool that automatically starts it in proxy mode + +ckpmsg - An application for passing messages in libckpool format to ckpool + +notifier - An application designed to be run with bitcoind's -blocknotify to + notify ckpool of block changes. + + +Installation is NOT required and ckpool can be run directly from the directory +it's built in but it can be installed with: +sudo make install + + +--- +RUNNING: + +ckpool supports the following options: + +-B | --btcsolo + +-c CONFIG | --config CONFIG + +-g GROUP | --group GROUP + +-H | --handover + +-h | --help + +-k | --killold + +-L | --log-shares + +-l LOGLEVEL | --loglevel LOGLEVEL + +-N | --node + +-n NAME | --name NAME + +-P | --passthrough + +-p | --proxy + +-R | --redirector + +-s SOCKDIR | --sockdir SOCKDIR + +-u | --userproxy + + +-B will start ckpool in BTCSOLO mode, which is designed for solo mining. All +usernames connected must be valid bitcoin addresses, and 100% of the block +reward will go to the user solving the block, minus any donation set. + +-c tells ckpool to override its default configuration filename and +load the specified one. If -c is not specified, ckpool looks for ckpool.conf, +in proxy mode it looks for ckproxy.conf, in passthrough mode for +ckpassthrough.conf and in redirector mode for ckredirector.conf + +-g will start ckpool as the group ID specified. + +-H will make ckpool attempt to receive a handover from a running incidence of +ckpool with the same name, taking its client listening socket and shutting it +down. + +-h displays the above help + +-k will make ckpool shut down an existing instance of ckpool with the same name, +killing it if need be. Otherwise ckpool will refuse to start if an instance of +the same name is already running. + +-L will log per share information in the logs directory divided by block height +and then workbase. + +-l will change the ckpool process name to that specified, allowing +multiple different named instances to be running. By default the variant +names are used: ckpool, ckproxy, ckpassthrough, ckredirector, cknode. + +-P will start ckpool in passthrough proxy mode where it collates all incoming +connections and streams all information on a single connection to an upstream +pool specified in ckproxy.conf . Downstream users all retain their individual +presence on the master pool. Standalone mode is implied. + +-p will start ckpool in proxy mode where it appears to be a local pool handling +clients as separate entities while presenting shares as a single user to the +upstream pool specified. Note that the upstream pool needs to be a ckpool for +it to scale to large hashrates. Standalone mode is Optional. + +-R will start ckpool in a variant of passthrough mode. It is designed to be a +front end to filter out users that never contribute any shares. Once an +accepted share from the upstream pool is detected, it will issue a redirect to +one of the redirecturl entries in the configuration file. It will cycle over +entries if multiple exist, but try to keep all clients from the same IP +redirecting to the same pool. + +-s tells ckpool which directory to place its own communication +sockets (/tmp by default) + +-u Userproxy mode will start ckpool in proxy mode as per the -p option above, +but in addition it will accept username/passwords from the stratum connects +and try to open additional connections with those credentials to the upstream +pool specified in the configuration file and then reconnect miners to mine with +their chosen username/password to the upstream pool. + + +ckpmsg and notifier support the -n, -p and -s options + +--- +CONFIGURATION + +At least one bitcoind is mandatory in ckpool mode with the minimum requirements +of server, rpcuser and rpcpassword set. + +Ckpool takes a json encoded configuration file in ckpool.conf by default or +ckproxy.conf in proxy or passthrough mode unless specified with -c. Sample +configurations for ckpool and ckproxy are included with the source. Entries +after the valid json are ignored and the space there can be used for comments. +The options recognised are as follows: + + +"btcd" : This is an array of bitcoind(s) with the options url, auth and pass +which match the configured bitcoind. The optional boolean field notify tells +ckpool this btcd is using the notifier and does not need to be polled for block +changes. If no btcd is specified, ckpool will look for one on localhost:8332 +with the username "user" and password "pass". + +"proxy" : This is an array in the same format as btcd above but is used in +proxy and passthrough mode to set the upstream pool and is mandatory. + +"btcaddress" : This is the bitcoin address to try to generate blocks to. It is +ignored in BTCSOLO mode. + +"btcsig" : This is an optional signature to put into the coinbase of mined +blocks. + +"blockpoll" : This is the frequency in milliseconds for how often to check for +new network blocks and is 100 by default. It is intended to be a backup only +for when the notifier is not set up and only polls if the "notify" field is +not set on a btcd. + +"donation" : Optional percentage donation of block reward that goes to the +developer of ckpool to assist in further development and maintenance of the +code. Takes a floating point value and defaults to zero if not set. + +"nodeserver" : This takes the same format as the serverurl array and specifies +additional IPs/ports to bind to that will accept incoming requests for mining +node communications. It is recommended to selectively isolate this address +to minimise unnecessary communications with unauthorised nodes. + +"nonce1length" : This is optional allowing the extranonce1 length to be chosen +from 2 to 8. Default 4 + +"nonce2length" : This is optional allowing the extranonce2 length to be chosen +from 2 to 8. Default 8 + +"update_interval" : This is the frequency that stratum updates are sent out to +miners and is set to 30 seconds by default to help perpetuate transactions for +the health of the bitcoin network. + +"version_mask" : This is a mask of which bits in the version number it is valid +for a client to alter and is expressed as an hex string. Eg "00fff000" +Default is "1fffe000". + +"serverurl" : This is the IP(s) to try to bind ckpool uniquely to, otherwise it +will attempt to bind to all interfaces in port 3333 by default in pool mode +and 3334 in proxy mode. Multiple entries can be specified as an array by +either IP or resolvable domain name but the executable must be able to bind to +all of them and ports up to 1024 usually require privileged access. + +"redirecturl" : This is an array of URLs that ckpool will redirect active +miners to in redirector mode. They must be valid resolvable URLs+ports. + +"mindiff" : Minimum diff that vardiff will allow miners to drop to. Default 1 + +"startdiff" : Starting diff that new clients are given. Default 42 + +"maxdiff" : Optional maximum diff that vardiff will clamp to where zero is no +maximum. + +"logdir" : Which directory to store pool and client logs. Default "logs" + +"maxclients" : Optional upper limit on the number of clients ckpool will +accept before rejecting further clients. + +"zmqblock" : Optional interface to use for zmq blockhash notification - ckpool +only. Requires use of matched bitcoind -zmqpubhashblock option. +Default: tcp://127.0.0.1:28332 diff --git a/solo-ckpool-source/README-SOLOMINING b/solo-ckpool-source/README-SOLOMINING new file mode 100644 index 0000000..3022e89 --- /dev/null +++ b/solo-ckpool-source/README-SOLOMINING @@ -0,0 +1,168 @@ +Local solo mining. + +--- + +QUICK START INSTRUCTIONS (build instructions not included.) + +Get a password from bitcoin core for the RPC (remote procedure calls) to allow +the pool to talk to it. You will need bitcoin core source code. + +Within the bitcoin source code directory type the following command: +share/rpcauth/rpcauth.py ckpool + +This will give you a message such as: + +String to be appended to bitcoin.conf: +rpcauth=ckpool:c6f55b4a74b8fcbca4e8b2be22d7d53b$e2ca5e642d7ef4f43ab2524964dc6b3625ccfde09a97866c5b97c40622192149 +Your password: +sI7jIjC61U9ZYTT29GnBpm0Rg1qQV9w_TXOfBF1vOM8 + + +Edit bitcoin.conf, enabling RPC (remote procedure calls) and add the rpcauth +line above. The following would allow ckpool to talk to a bitcoin daemon running +on the same hardware: + +server=1 +rpcauth=ckpool:c6f55b4a74b8fcbca4e8b2be22d7d53b$e2ca5e642d7ef4f43ab2524964dc6b3625ccfde09a97866c5b97c40622192149 +rpcallowip=127.0.0.1 +rpcbind=127.0.0.1 + +Make sure to use the line you got when running the rpcauth command. + + +Restart the bitcoin daemon with zmq messaging enabled by adding the following +to the startup command: + +-zmqpubhashblock=tcp://127.0.0.1:28332 + + +If your bitcoin daemon was built without zmq support, you can use the ckpool +notifier included by adding the following command + +-blocknotify=$CKPOOLSOURCE/src/notifier + +(Replace $CKPOOLSOURCE with the path to where you have the ckpool source code) + + +Create or modify a ckpool configuration file (such as ckpool.conf), including +the minimum necessary entries. Make sure to use the password you got in step 1. + +{ +"btcd" : [ + { + "url" : "127.0.0.1:8332", + "auth" : "ckpool", + "pass" : "sI7jIjC61U9ZYTT29GnBpm0Rg1qQV9w_TXOfBF1vOM8", + "notify" : true + } +] +} + + +Start ckpool from the source code directory (pointing to the configuration file +only if it has a different name to ckpool.conf or is placed elsewhere) in solo +mode: + +src/ckpool -B + + +Point the pools entry on your mining hardware to the local IP address where +ckpool is running on port 3333, setting a username to the bitcoin address you +wish to mine to, and put anything in the password field (such as "x") +.e.g if ckpool has a local IP address of 192.168.1.100 + +url: 192.168.1.100:3333 +username: 1PKN98VN2z5gwSGZvGKS2bj8aADZBkyhkZ +password: x + +Any valid bitcoin address will work + +(Hope for) profit. + +--- + +OPTIONAL CHANGES. + + +Most of the ckpool configuration options would not need to be modified for a +local solo mining operation, and some of the config options are not used in +solo mode. The ckpool.conf included with the source has all the available +configuration options and is not recommended to be used as is. The following +options may be useful for a local solo mining operation. + + +Mining to one fixed address. If you only plan to mine to one fixed address and +not have to worry about setting the username in every piece of mining hardware, +you can set a bitcoin address to mine to as follows: + +"btcaddress" : "14BMjogz69qe8hk9thyzbmR5pg34mVKB1e", + +You must then start ckpool withOUT the -B option. This would mine to the +address 14BMjogz69qe8hk9thyzbmR5pg34mVKB1e, so modify it to the bitcoin address +you wish to mine to. + + +You can set the starting diff (instead of the default 42) on the pool as +follows: + +"startdiff" : 10000, + + +You can define a signature to be mined into any blocks you solved as follows: + +"btcsig" : "/mined by ck/", + + +You may wish to enable a donation to the author of ckpool with any blocks found +as a percentage (such as 0.5%) as follows: + +"donation" : 0.5, + +Donation is completely optional and disabled by default, but most appreciated. +0.5% would be a reasonable value. + + +By default ckpool binds to every local IP address on the hardware it's run on, +but you can restrict it to certain addresses or change the port it runs on as +follows: + +"serverurl" : [ + "127.0.0.1:3333", + "192.168.1.100:3334" +], + +In addition, if you specify a port above 4000, it will become a "high diff" +port that sets the minimum difficulty to 1 million. + +You can specify a different configuration file as follows: + +src/ckpool -B -c myconfig.conf + +or you can start ckpool with a different name and it will look for the +associated configuration + +src/ckpool -B -n local + +this will look for a configuration file called local.conf + +--- + +NOTES. + +Json is very strict with its field processing although spacing is flexible. The +most common error to watch out for is to NOT put a comma after the last field. + +You can mine with a pruned blockchain if you are short on space, though it is +not recommended as it can add more latency. + +Bitcoin core is NOT optimised for mining by default without modification, and +mining solo locally should be reserved as a backup operation only unless you +have the skills, hardware, and data centre quality connectivity to minimise +latency. + +Mining on testnet may create a cascade of solved competing blocks when the diff +is 1. This is normal as the default behaviour is optimised around mainnet +mining where block solving is rare. + + +Good luck. diff --git a/solo-ckpool-source/autogen.sh b/solo-ckpool-source/autogen.sh new file mode 100755 index 0000000..b483139 --- /dev/null +++ b/solo-ckpool-source/autogen.sh @@ -0,0 +1,2 @@ +#!/bin/sh +autoreconf --force --install -I m4 diff --git a/solo-ckpool-source/cknode.conf b/solo-ckpool-source/cknode.conf new file mode 100644 index 0000000..3a02557 --- /dev/null +++ b/solo-ckpool-source/cknode.conf @@ -0,0 +1,34 @@ +{ +"btcd" : [ + { + "url" : "localhost:8332", + "auth" : "user", + "pass" : "pass", + "notify" : true + }, + { + "url" : "backup:8332", + "auth" : "user", + "pass" : "pass", + "notify" : false + } +], +"proxy" : [ + { + "url" : "ckpool.org:3333", + "auth" : "user", + "pass" : "pass" + }, + { + "url" : "backup.ckpool.org:3333", + "auth" : "user", + "pass" : "pass" + } +], +"serverurl" : [ + "192.168.1.100:3334", + "127.0.0.1:3334" + ], +"logdir" : "logs" +} +Comments from here on are ignored. diff --git a/solo-ckpool-source/ckpassthrough.conf b/solo-ckpool-source/ckpassthrough.conf new file mode 100644 index 0000000..c5b85bc --- /dev/null +++ b/solo-ckpool-source/ckpassthrough.conf @@ -0,0 +1,15 @@ +{ +"proxy" : [ + { + "url" : "ckpool.org:3333", + "auth" : "user", + "pass" : "pass" + } +], +"serverurl" : [ + "192.168.1.100:3334", + "127.0.0.1:3334" + ], +"logdir" : "logs" +} +Comments from here on are ignored. diff --git a/solo-ckpool-source/ckpool.conf b/solo-ckpool-source/ckpool.conf new file mode 100644 index 0000000..d6c9cd8 --- /dev/null +++ b/solo-ckpool-source/ckpool.conf @@ -0,0 +1,42 @@ +{ +"btcd" : [ + { + "url" : "localhost:8332", + "auth" : "user", + "pass" : "pass", + "notify" : true + }, + { + "url" : "backup:8332", + "auth" : "user", + "pass" : "pass", + "notify" : false + } +], +"upstream" : "main.ckpool.org:3336", +"btcaddress" : "14BMjogz69qe8hk9thyzbmR5pg34mVKB1e", +"btcsig" : "/mined by ck/", +"blockpoll" : 100, +"donation" : 2.0, +"nonce1length" : 4, +"nonce2length" : 8, +"update_interval" : 30, +"version_mask" : "1fffe000", +"serverurl" : [ + "ckpool.org:3333", + "node.ckpool.org:3333", + "node.ckpool.org:80" +], +"nodeserver" : [ + "ckpool.org:3335" +], +"trusted" : [ + "ckpool.org:3336" +], +"mindiff" : 1, +"startdiff" : 42, +"maxdiff" : 0, +"zmqblock" : "tcp://127.0.0.1:28332", +"logdir" : "logs" +} +Comments from here on are ignored. diff --git a/solo-ckpool-source/ckproxy.conf b/solo-ckpool-source/ckproxy.conf new file mode 100644 index 0000000..fec5783 --- /dev/null +++ b/solo-ckpool-source/ckproxy.conf @@ -0,0 +1,24 @@ +{ +"proxy" : [ + { + "url" : "ckpool.org:3333", + "auth" : "user", + "pass" : "pass" + }, + { + "url" : "backup.ckpool.org:3333", + "auth" : "user", + "pass" : "pass" + } +], +"update_interval" : 30, +"serverurl" : [ + "192.168.1.100:3334", + "127.0.0.1:3334" + ], +"mindiff" : 1, +"startdiff" : 42, +"maxdiff" : 0, +"logdir" : "logs" +} +Comments from here on are ignored. diff --git a/solo-ckpool-source/ckredirector.conf b/solo-ckpool-source/ckredirector.conf new file mode 100644 index 0000000..2c499a5 --- /dev/null +++ b/solo-ckpool-source/ckredirector.conf @@ -0,0 +1,23 @@ +{ +"proxy" : [ + { + "url" : "ckpool.org:3333", + "auth" : "user", + "pass" : "pass" + } +], +"update_interval" : 30, +"serverurl" : [ + "192.168.1.100:3334", + "127.0.0.1:3334" + ], +"redirecturl" : [ + "node1.ckpool.org:3333", + "node2.ckpool.org:3333" + ], +"mindiff" : 1, +"startdiff" : 42, +"maxdiff" : 0, +"logdir" : "logs" +} +Comments from here on are ignored. diff --git a/solo-ckpool-source/configure.ac b/solo-ckpool-source/configure.ac new file mode 100644 index 0000000..5c3bfe5 --- /dev/null +++ b/solo-ckpool-source/configure.ac @@ -0,0 +1,98 @@ +AC_INIT([ckpool],[0.9.9],[kernel@kolivas.org]) + +AC_CANONICAL_TARGET +AC_CONFIG_MACRO_DIR([m4]) +AC_CONFIG_SRCDIR([src/ckpool.c]) +AC_CONFIG_HEADERS([config.h]) + +AM_INIT_AUTOMAKE([foreign subdir-objects]) +m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) +AC_USE_SYSTEM_EXTENSIONS + +AC_CANONICAL_BUILD +AC_CANONICAL_HOST + +dnl Make sure anyone changing configure.ac/Makefile.am has a clue +AM_MAINTAINER_MODE + +dnl Checks for programs +AC_PROG_CC +# gl_EARLY - maybe later +AC_PROG_GCC_TRADITIONAL +AM_PROG_CC_C_O +LT_INIT([disable-shared]) + +# gl_INIT - maybe later + +dnl Checks for header files. + +AC_FUNC_ALLOCA + +PKG_PROG_PKG_CONFIG() + +AC_CHECK_HEADERS(stdio.h stdlib.h fcntl.h sys/time.h unistd.h dirent.h) +AC_CHECK_HEADERS(ctype.h errno.h byteswap.h string.h time.h fenv.h) +AC_CHECK_HEADERS(endian.h sys/endian.h arpa/inet.h sys/poll.h syslog.h) +AC_CHECK_HEADERS(alloca.h pthread.h stdio.h math.h signal.h sys/prctl.h) +AC_CHECK_HEADERS(sys/types.h sys/socket.h sys/stat.h linux/un.h netdb.h) +AC_CHECK_HEADERS(stdint.h netinet/in.h netinet/tcp.h sys/ioctl.h getopt.h) +AC_CHECK_HEADERS(sys/epoll.h libpq-fe.h postgresql/libpq-fe.h grp.h) +AC_CHECK_HEADERS(gsl/gsl_math.h gsl/gsl_cdf.h) +AC_CHECK_HEADERS(openssl/x509.h openssl/hmac.h) +AC_CHECK_HEADERS(zmq.h) + +AC_CHECK_PROG(YASM, yasm, yes) +AM_CONDITIONAL([HAVE_YASM], [test x$YASM = xyes]) + +rorx= +avx1= +sse4= +if test x$YASM = xyes; then + rorx=`cat /proc/cpuinfo | grep -o -m 1 avx2` + if [test x$rorx != xavx2]; then + avx1=`cat /proc/cpuinfo | grep -o -m 1 avx` + if [test x$avx1 != xavx]; then + sse4=`cat /proc/cpuinfo | grep -o -m 1 sse4_1` + fi + fi +fi +AM_CONDITIONAL([HAVE_AVX2], [test x$rorx = xavx2]) +AM_CONDITIONAL([HAVE_AVX1], [test x$avx1 = xavx]) +AM_CONDITIONAL([HAVE_SSE4], [test x$sse4 = xsse4_1]) +if test x$rorx = xavx2; then + AC_DEFINE([USE_AVX2], [1], [Use avx2 assembly instructions for sha256]) +fi +if test x$avx1 = xavx; then + AC_DEFINE([USE_AVX1], [1], [Use avx1 assembly instructions for sha256]) +fi +if test x$sse4 = xsse4_1; then + AC_DEFINE([USE_SSE4], [1], [Use sse4 assembly instructions for sha256]) +fi + +AC_CONFIG_SUBDIRS([src/jansson-2.14]) +JANSSON_LIBS="jansson-2.14/src/.libs/libjansson.a" + +AC_SUBST(JANSSON_LIBS) + +AC_SEARCH_LIBS(clock_nanosleep, rt, , echo "Error: Required library rt not found." && exit 1) +AC_SEARCH_LIBS(exp, m, , echo "Error: Required library math not found." && exit 1) +AC_SEARCH_LIBS(pthread_mutex_trylock, pthread, , echo "Error: Required library pthreads not found." && exit 1) +AC_SEARCH_LIBS(zmq_socket, zmq, ZMQ=yes, ZMQ=no) + +AC_CONFIG_FILES([Makefile src/Makefile]) +AC_OUTPUT + +LDFLAGS="${LDFLAGS} -Wl,--as-needed" + +echo +echo "Compilation............: make (or gmake)" +echo " YASM (Intel ASM).....: $YASM" +echo " ZMQ..................: $ZMQ" +echo " CPPFLAGS.............: $CPPFLAGS" +echo " CFLAGS...............: $CFLAGS" +echo " LDFLAGS..............: $LDFLAGS" +echo " LDADD................: $LIBS $JANSSON_LIBS" +echo +echo "Installation...........: make install (as root if needed, with 'su' or 'sudo')" +echo " prefix...............: $prefix" +echo diff --git a/solo-ckpool-source/m4/.gitignore b/solo-ckpool-source/m4/.gitignore new file mode 100644 index 0000000..9d5dc7f --- /dev/null +++ b/solo-ckpool-source/m4/.gitignore @@ -0,0 +1,30 @@ +*.o +*.bin + +autom4te.cache +.deps + +Makefile +Makefile.in +INSTALL +aclocal.m4 +configure +depcomp +missing +install-sh +stamp-h1 +compile +config.log +config.status +config.guess +config.sub + +*~ + +ext_deps +config.h.in +config.h + +mkinstalldirs + +*.swp diff --git a/solo-ckpool-source/src/Makefile.am b/solo-ckpool-source/src/Makefile.am new file mode 100644 index 0000000..ba0af01 --- /dev/null +++ b/solo-ckpool-source/src/Makefile.am @@ -0,0 +1,42 @@ +SUBDIRS = jansson-2.14 + +ACLOCAL_AMFLAGS = -I m4 +AM_CPPFLAGS = -I$(top_srcdir)/src/jansson-2.14/src + +native_objs := + +if HAVE_AVX2 +native_objs += sha256_code_release/sha256_avx2_rorx2.A +endif +if HAVE_AVX1 +native_objs += sha256_code_release/sha256_avx1.A +endif +if HAVE_SSE4 +native_objs += sha256_code_release/sha256_sse4.A +endif + +%.A: %.asm + yasm -f x64 -f elf64 -X gnu -g dwarf2 -D LINUX -o $@ $< + +noinst_LIBRARIES = libckpool.a +libckpool_a_SOURCES = libckpool.c libckpool.h sha2.c sha2.h sha256_code_release +libckpool_a_LIBADD = $(native_objs) + +bin_PROGRAMS = ckpool ckpmsg notifier +ckpool_SOURCES = ckpool.c ckpool.h generator.c generator.h bitcoin.c bitcoin.h \ + stratifier.c stratifier.h connector.c connector.h uthash.h \ + utlist.h +ckpool_LDADD = libckpool.a @JANSSON_LIBS@ @LIBS@ + +ckpmsg_SOURCES = ckpmsg.c +ckpmsg_LDADD = libckpool.a @JANSSON_LIBS@ + +notifier_SOURCES = notifier.c +notifier_LDADD = libckpool.a @JANSSON_LIBS@ + +install-exec-hook: + setcap CAP_NET_BIND_SERVICE=+eip $(bindir)/ckpool + $(LN_S) -f ckpool $(DESTDIR)$(bindir)/ckproxy + +uninstall-local: + rm -f $(bindir)/ckproxy diff --git a/solo-ckpool-source/src/bitcoin.c b/solo-ckpool-source/src/bitcoin.c new file mode 100644 index 0000000..fa2132e --- /dev/null +++ b/solo-ckpool-source/src/bitcoin.c @@ -0,0 +1,424 @@ +/* + * Copyright 2014-2018,2023 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include + +#include "ckpool.h" +#include "libckpool.h" +#include "bitcoin.h" +#include "stratifier.h" + +static char* understood_rules[] = {"segwit"}; + +static bool check_required_rule(const char* rule) +{ + unsigned int i; + + for (i = 0; i < sizeof(understood_rules) / sizeof(understood_rules[0]); i++) { + if (safecmp(understood_rules[i], rule) == 0) + return true; + } + return false; +} + +/* Take a bitcoin address and do some sanity checks on it, then send it to + * bitcoind to see if it's a valid address */ +bool validate_address(connsock_t *cs, const char *address, bool *script, bool *segwit) +{ + json_t *val, *res_val, *valid_val, *tmp_val; + char rpc_req[128]; + bool ret = false; + + if (unlikely(!address)) { + LOGWARNING("Null address passed to validate_address"); + return ret; + } + + snprintf(rpc_req, 128, "{\"method\": \"validateaddress\", \"params\": [\"%s\"]}\n", address); + val = json_rpc_response(cs, rpc_req); + if (!val) { + /* May get a parse error with an invalid address */ + LOGNOTICE("%s:%s Failed to get valid json response to validate_address %s", + cs->url, cs->port, address); + return ret; + } + res_val = json_object_get(val, "result"); + if (!res_val) { + LOGERR("Failed to get result json response to validate_address"); + goto out; + } + valid_val = json_object_get(res_val, "isvalid"); + if (!valid_val) { + LOGERR("Failed to get isvalid json response to validate_address"); + goto out; + } + if (!json_is_true(valid_val)) { + LOGDEBUG("Bitcoin address %s is NOT valid", address); + goto out; + } + ret = true; + tmp_val = json_object_get(res_val, "isscript"); + if (unlikely(!tmp_val)) { + /* All recent bitcoinds with wallet support built in should + * support this, if not, look for addresses the braindead way + * to tell if it's a script address. */ + LOGDEBUG("No isscript support from bitcoind"); + if (address[0] == '3' || address[0] == '2') + *script = true; + /* Now look to see this isn't a bech32: We can't support + * bech32 without knowing if it's a pubkey or a script */ + else if (address[0] != '1' && address[0] != 'm') + ret = false; + goto out; + } + *script = json_is_true(tmp_val); + tmp_val = json_object_get(res_val, "iswitness"); + if (unlikely(!tmp_val)) + goto out; + *segwit = json_is_true(tmp_val); + LOGDEBUG("Bitcoin address %s IS valid%s%s", address, *script ? " script" : "", + *segwit ? " segwit" : ""); +out: + if (val) + json_decref(val); + return ret; +} + +json_t *validate_txn(connsock_t *cs, const char *txn) +{ + json_t *val = NULL; + char *rpc_req; + int len; + + if (unlikely(!txn || !strlen(txn))) { + LOGWARNING("Null transaction passed to validate_txn"); + goto out; + } + len = strlen(txn) + 64; + rpc_req = ckalloc(len); + sprintf(rpc_req, "{\"method\": \"decoderawtransaction\", \"params\": [\"%s\"]}", txn); + val = json_rpc_call(cs, rpc_req); + dealloc(rpc_req); + if (!val) + LOGDEBUG("%s:%s Failed to get valid json response to decoderawtransaction", cs->url, cs->port); +out: + return val; +} + +static const char *gbt_req = "{\"method\": \"getblocktemplate\", \"params\": [{\"capabilities\": [\"coinbasetxn\", \"workid\", \"coinbase/append\"], \"rules\" : [\"segwit\"]}]}\n"; + +/* Request getblocktemplate from bitcoind already connected with a connsock_t + * and then summarise the information to the most efficient set of data + * required to assemble a mining template, storing it in a gbtbase_t structure */ +bool gen_gbtbase(connsock_t *cs, gbtbase_t *gbt) +{ + json_t *rules_array, *coinbase_aux, *res_val, *val; + const char *previousblockhash; + char hash_swap[32], tmp[32]; + uint64_t coinbasevalue; + const char *target; + const char *flags; + const char *bits; + const char *rule; + int version; + int curtime; + int height; + int i; + bool ret = false; + + val = json_rpc_call(cs, gbt_req); + if (!val) { + LOGWARNING("%s:%s Failed to get valid json response to getblocktemplate", cs->url, cs->port); + return ret; + } + res_val = json_object_get(val, "result"); + if (!res_val) { + LOGWARNING("Failed to get result in json response to getblocktemplate"); + goto out; + } + + rules_array = json_object_get(res_val, "rules"); + if (rules_array) { + int rule_count = json_array_size(rules_array); + + for (i = 0; i < rule_count; i++) { + rule = json_string_value(json_array_get(rules_array, i)); + if (rule && *rule++ == '!' && !check_required_rule(rule)) { + LOGERR("Required rule not understood: %s", rule); + goto out; + } + } + } + + previousblockhash = json_string_value(json_object_get(res_val, "previousblockhash")); + target = json_string_value(json_object_get(res_val, "target")); + version = json_integer_value(json_object_get(res_val, "version")); + curtime = json_integer_value(json_object_get(res_val, "curtime")); + bits = json_string_value(json_object_get(res_val, "bits")); + height = json_integer_value(json_object_get(res_val, "height")); + coinbasevalue = json_integer_value(json_object_get(res_val, "coinbasevalue")); + coinbase_aux = json_object_get(res_val, "coinbaseaux"); + flags = json_string_value(json_object_get(coinbase_aux, "flags")); + if (!flags) + flags = ""; + + if (unlikely(!previousblockhash || !target || !version || !curtime || !bits || !coinbase_aux)) { + LOGERR("JSON failed to decode GBT %s %s %d %d %s %s", previousblockhash, target, version, curtime, bits, flags); + goto out; + } + + /* Store getblocktemplate for remainder of json components as is */ + json_incref(res_val); + json_object_del(val, "result"); + gbt->json = res_val; + + hex2bin(hash_swap, previousblockhash, 32); + swap_256(tmp, hash_swap); + __bin2hex(gbt->prevhash, tmp, 32); + + strncpy(gbt->target, target, 65); + + hex2bin(hash_swap, target, 32); + bswap_256(tmp, hash_swap); + gbt->diff = diff_from_target((uchar *)tmp); + json_object_set_new_nocheck(gbt->json, "diff", json_real(gbt->diff)); + + gbt->version = version; + + gbt->curtime = curtime; + + snprintf(gbt->ntime, 9, "%08x", curtime); + json_object_set_new_nocheck(gbt->json, "ntime", json_string_nocheck(gbt->ntime)); + sscanf(gbt->ntime, "%x", &gbt->ntime32); + + snprintf(gbt->bbversion, 9, "%08x", version); + json_object_set_new_nocheck(gbt->json, "bbversion", json_string_nocheck(gbt->bbversion)); + + snprintf(gbt->nbit, 9, "%s", bits); + json_object_set_new_nocheck(gbt->json, "nbit", json_string_nocheck(gbt->nbit)); + + gbt->coinbasevalue = coinbasevalue; + + gbt->height = height; + + gbt->flags = strdup(flags); + + ret = true; +out: + json_decref(val); + return ret; +} + +void clear_gbtbase(gbtbase_t *gbt) +{ + free(gbt->flags); + if (gbt->json) + json_decref(gbt->json); + memset(gbt, 0, sizeof(gbtbase_t)); +} + +static const char *blockcount_req = "{\"method\": \"getblockcount\"}\n"; + +/* Request getblockcount from bitcoind, returning the count or -1 if the call + * fails. */ +int get_blockcount(connsock_t *cs) +{ + json_t *val, *res_val; + int ret = -1; + + val = json_rpc_call(cs, blockcount_req); + if (!val) { + LOGWARNING("%s:%s Failed to get valid json response to getblockcount", cs->url, cs->port); + return ret; + } + res_val = json_object_get(val, "result"); + if (!res_val) { + LOGWARNING("Failed to get result in json response to getblockcount"); + goto out; + } + ret = json_integer_value(res_val); +out: + json_decref(val); + return ret; +} + +/* Request getblockhash from bitcoind for height, writing the value into *hash + * which should be at least 65 bytes long since the hash is 64 chars. */ +bool get_blockhash(connsock_t *cs, int height, char *hash) +{ + json_t *val, *res_val; + const char *res_ret; + char rpc_req[128]; + bool ret = false; + + sprintf(rpc_req, "{\"method\": \"getblockhash\", \"params\": [%d]}\n", height); + val = json_rpc_call(cs, rpc_req); + if (!val) { + LOGWARNING("%s:%s Failed to get valid json response to getblockhash", cs->url, cs->port); + return ret; + } + res_val = json_object_get(val, "result"); + if (!res_val) { + LOGWARNING("Failed to get result in json response to getblockhash"); + goto out; + } + res_ret = json_string_value(res_val); + if (!res_ret || !strlen(res_ret)) { + LOGWARNING("Got null string in result to getblockhash"); + goto out; + } + strncpy(hash, res_ret, 65); + ret = true; +out: + json_decref(val); + return ret; +} + +static const char *bestblockhash_req = "{\"method\": \"getbestblockhash\"}\n"; + +/* Request getbestblockhash from bitcoind. bitcoind 0.9+ only */ +bool get_bestblockhash(connsock_t *cs, char *hash) +{ + json_t *val, *res_val; + const char *res_ret; + bool ret = false; + + val = json_rpc_call(cs, bestblockhash_req); + if (!val) { + LOGWARNING("%s:%s Failed to get valid json response to getbestblockhash", cs->url, cs->port); + return ret; + } + res_val = json_object_get(val, "result"); + if (!res_val) { + LOGWARNING("Failed to get result in json response to getbestblockhash"); + goto out; + } + res_ret = json_string_value(res_val); + if (!res_ret || !strlen(res_ret)) { + LOGWARNING("Got null string in result to getbestblockhash"); + goto out; + } + strncpy(hash, res_ret, 65); + ret = true; +out: + json_decref(val); + return ret; +} + +bool submit_block(connsock_t *cs, const char *params) +{ + json_t *val, *res_val; + int len, retries = 0; + const char *res_ret; + bool ret = false; + char *rpc_req; + + len = strlen(params) + 64; +retry: + rpc_req = ckalloc(len); + sprintf(rpc_req, "{\"method\": \"submitblock\", \"params\": [\"%s\"]}\n", params); + val = json_rpc_call(cs, rpc_req); + dealloc(rpc_req); + if (!val) { + LOGWARNING("%s:%s Failed to get valid json response to submitblock", cs->url, cs->port); + if (++retries < 5) + goto retry; + return ret; + } + res_val = json_object_get(val, "result"); + if (!res_val) { + LOGWARNING("Failed to get result in json response to submitblock"); + if (++retries < 5) { + json_decref(val); + goto retry; + } + goto out; + } + if (!json_is_null(res_val)) { + res_ret = json_string_value(res_val); + if (res_ret && strlen(res_ret)) { + LOGWARNING("SUBMIT BLOCK RETURNED: %s", res_ret); + /* Consider duplicate response as an accepted block */ + if (safecmp(res_ret, "duplicate")) + goto out; + } else { + LOGWARNING("SUBMIT BLOCK GOT NO RESPONSE!"); + goto out; + } + } + LOGWARNING("BLOCK ACCEPTED!"); + ret = true; +out: + json_decref(val); + return ret; +} + +void precious_block(connsock_t *cs, const char *params) +{ + char *rpc_req; + int len; + + if (unlikely(!cs->alive)) { + LOGDEBUG("Failed to submit_txn due to connsock dead"); + return; + } + + len = strlen(params) + 64; + rpc_req = ckalloc(len); + sprintf(rpc_req, "{\"method\": \"preciousblock\", \"params\": [\"%s\"]}\n", params); + json_rpc_msg(cs, rpc_req); + dealloc(rpc_req); +} + +void submit_txn(connsock_t *cs, const char *params) +{ + char *rpc_req; + int len; + + if (unlikely(!cs->alive)) { + LOGDEBUG("Failed to submit_txn due to connsock dead"); + return; + } + + len = strlen(params) + 64; + rpc_req = ckalloc(len); + sprintf(rpc_req, "{\"method\": \"sendrawtransaction\", \"params\": [\"%s\"]}\n", params); + json_rpc_msg(cs, rpc_req); + dealloc(rpc_req); +} + +char *get_txn(connsock_t *cs, const char *hash) +{ + char *rpc_req, *ret = NULL; + json_t *val, *res_val; + + if (unlikely(!cs->alive)) { + LOGDEBUG("Failed to get_txn due to connsock dead"); + goto out; + } + + ASPRINTF(&rpc_req, "{\"method\": \"getrawtransaction\", \"params\": [\"%s\"]}\n", hash); + val = json_rpc_response(cs, rpc_req); + dealloc(rpc_req); + if (!val) { + LOGDEBUG("%s:%s Failed to get valid json response to get_txn", cs->url, cs->port); + goto out; + } + res_val = json_object_get(val, "result"); + if (res_val && !json_is_null(res_val) && json_is_string(res_val)) { + ret = strdup(json_string_value(res_val)); + LOGDEBUG("get_txn for hash %s got data %s", hash, ret); + } else + LOGDEBUG("get_txn did not retrieve data for hash %s", hash); + json_decref(val); +out: + return ret; +} diff --git a/solo-ckpool-source/src/bitcoin.h b/solo-ckpool-source/src/bitcoin.h new file mode 100644 index 0000000..73013c7 --- /dev/null +++ b/solo-ckpool-source/src/bitcoin.h @@ -0,0 +1,27 @@ +/* + * Copyright 2014-2018,2023 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#ifndef BITCOIN_H +#define BITCOIN_H + +typedef struct genwork gbtbase_t; + +bool validate_address(connsock_t *cs, const char *address, bool *script, bool *segwit); +json_t *validate_txn(connsock_t *cs, const char *txn); +bool gen_gbtbase(connsock_t *cs, gbtbase_t *gbt); +void clear_gbtbase(gbtbase_t *gbt); +int get_blockcount(connsock_t *cs); +bool get_blockhash(connsock_t *cs, int height, char *hash); +bool get_bestblockhash(connsock_t *cs, char *hash); +bool submit_block(connsock_t *cs, const char *params); +void precious_block(connsock_t *cs, const char *params); +void submit_txn(connsock_t *cs, const char *params); +char *get_txn(connsock_t *cs, const char *hash); + +#endif /* BITCOIN_H */ diff --git a/solo-ckpool-source/src/ckpmsg.c b/solo-ckpool-source/src/ckpmsg.c new file mode 100644 index 0000000..c9763cb --- /dev/null +++ b/solo-ckpool-source/src/ckpmsg.c @@ -0,0 +1,330 @@ +/* + * Copyright 2014-2018,2023 Con Kolivas + * Copyright 2014-2016 Andrew Smith + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "libckpool.h" +#include "utlist.h" + +struct input_log { + struct input_log *next; + struct input_log *prev; + char *buf; +}; + +struct input_log *input_log; + +static int msg_loglevel = LOG_DEBUG; + +void logmsg(int loglevel, const char *fmt, ...) +{ + va_list ap; + char *buf; + + if (loglevel <= msg_loglevel) { + va_start(ap, fmt); + VASPRINTF(&buf, fmt, ap); + va_end(ap); + + printf("%s\n", buf); + free(buf); + } +} + +void mkstamp(char *stamp, size_t siz) +{ + long minoff, hroff; + char tzinfo[24]; + time_t now_t; + struct tm tm; + char tzch; + + now_t = time(NULL); + localtime_r(&now_t, &tm); + minoff = tm.tm_gmtoff / 60; + if (minoff < 0) { + tzch = '-'; + minoff *= -1; + } else + tzch = '+'; + hroff = minoff / 60; + if (minoff % 60) { + snprintf(tzinfo, sizeof(tzinfo), + "%c%02ld:%02ld", + tzch, hroff, minoff % 60); + } else { + snprintf(tzinfo, sizeof(tzinfo), + "%c%02ld", + tzch, hroff); + } + snprintf(stamp, siz, + "[%d-%02d-%02d %02d:%02d:%02d%s]", + tm.tm_year + 1900, + tm.tm_mon + 1, + tm.tm_mday, + tm.tm_hour, + tm.tm_min, + tm.tm_sec, + tzinfo); +} + +static struct option long_options[] = { + {"counter", no_argument, 0, 'c'}, + {"help", no_argument, 0, 'h'}, + {"loglevel", required_argument, 0, 'l'}, + {"name", required_argument, 0, 'n'}, + {"sockname", required_argument, 0, 'N'}, + {"proxy", no_argument, 0, 'p'}, + {"sockdir", required_argument, 0, 's'}, + {"timeout1", required_argument, 0, 't'}, + {"timeout2", required_argument, 0, 'T'}, + {0, 0, 0, 0} +}; + +struct termios oldctrl; + +static void sighandler(const int sig) +{ + /* Return console to its previous state */ + tcsetattr(STDIN_FILENO, TCSANOW, &oldctrl); + + if (sig) { + signal (sig, SIG_DFL); + raise (sig); + } +} + +int get_line(char **buf) +{ + struct input_log *entry = NULL; + int c, len = 0, ctl1, ctl2; + struct termios ctrl; + *buf = NULL; + + /* If we're not reading from a terminal, parse lines at a time allowing + * us to script usage of ckpmsg */ + if (!isatty(fileno((FILE *)stdin))) do { + size_t n; + + dealloc(*buf); + len = getline(buf, &n, stdin); + if (len == -1) { + dealloc(*buf); + goto out; + } + len = strlen(*buf); + (*buf)[--len] = '\0'; // Strip \n + goto out; + } while (42); + + tcgetattr(STDIN_FILENO, &ctrl); + ctrl.c_lflag &= ~(ICANON | ECHO); // turn off canonical mode and echo + tcsetattr(STDIN_FILENO, TCSANOW, &ctrl); + + do { + c = getchar(); + if (c == EOF || c == '\n') + break; + if (c == 27) { + ctl1 = getchar(); + ctl2 = getchar(); + if (ctl1 != '[') + continue; + if (ctl2 < 'A' || ctl2 > 'B') + continue; + if (!input_log) + continue; + printf("\33[2K\r"); + free(*buf); + if (ctl2 == 'B') + entry = entry ? entry->prev : input_log->prev; + else + entry = entry ? entry->next : input_log; + *buf = strdup(entry->buf); + len = strlen(*buf); + printf("%s", *buf); + } + if (c == 127) { + if (!len) + continue; + printf("\b \b"); + (*buf)[--len] = '\0'; + continue; + } + if (c < 32 || c > 126) + continue; + len++; + realloc_strcat(buf, (char *)&c); + putchar(c); + } while (42); + + if (*buf) + len = strlen(*buf); + printf("\n"); +out: + return len; +} + +int main(int argc, char **argv) +{ + char *name = NULL, *socket_dir = NULL, *buf = NULL, *sockname = "listener"; + bool proxy = false, counter = false; + int tmo1 = RECV_UNIX_TIMEOUT1; + int tmo2 = RECV_UNIX_TIMEOUT2; + struct sigaction handler; + int c, count, i = 0, j; + char stamp[128]; + + tcgetattr(STDIN_FILENO, &oldctrl); + + while ((c = getopt_long(argc, argv, "chl:N:n:ps:t:T:", long_options, &i)) != -1) { + switch(c) { + /* You'd normally disable most logmsg with -l 3 to + * only see the counter */ + case 'c': + counter = true; + break; + case 'h': + for (j = 0; long_options[j].val; j++) { + struct option *jopt = &long_options[j]; + + if (jopt->has_arg) { + char *upper = alloca(strlen(jopt->name) + 1); + int offset = 0; + + do { + upper[offset] = toupper(jopt->name[offset]); + } while (upper[offset++] != '\0'); + printf("-%c %s | --%s %s\n", jopt->val, + upper, jopt->name, upper); + } else + printf("-%c | --%s\n", jopt->val, jopt->name); + } + exit(0); + case 'l': + msg_loglevel = atoi(optarg); + if (msg_loglevel < LOG_EMERG || + msg_loglevel > LOG_DEBUG) { + quit(1, "Invalid loglevel: %d (range %d" + " - %d)", + msg_loglevel, + LOG_EMERG, + LOG_DEBUG); + } + break; + /* Allows us to specify which process or socket to + * talk to. */ + case 'N': + sockname = strdup(optarg); + break; + case 'n': + name = strdup(optarg); + break; + case 'p': + proxy = true; + break; + case 's': + socket_dir = strdup(optarg); + break; + case 't': + tmo1 = atoi(optarg); + break; + case 'T': + tmo2 = atoi(optarg); + break; + } + } + if (!socket_dir) + socket_dir = strdup("/tmp"); + trail_slash(&socket_dir); + if (!name) { + if (proxy) + name = strdup("ckproxy"); + else + name = strdup("ckpool"); + } + realloc_strcat(&socket_dir, name); + dealloc(name); + trail_slash(&socket_dir); + realloc_strcat(&socket_dir, sockname); + + signal(SIGPIPE, SIG_IGN); + handler.sa_handler = &sighandler; + handler.sa_flags = 0; + sigemptyset(&handler.sa_mask); + sigaction(SIGTERM, &handler, NULL); + sigaction(SIGINT, &handler, NULL); + sigaction(SIGQUIT, &handler, NULL); + sigaction(SIGKILL, &handler, NULL); + sigaction(SIGHUP, &handler, NULL); + + count = 0; + while (42) { + struct input_log *log_entry; + int sockd, len; + char *buf2; + + len = get_line(&buf); + if (len == -1) + break; + mkstamp(stamp, sizeof(stamp)); + if (len < 1) { + LOGERR("%s No message", stamp); + continue; + } + if (buf[0] == '#') { + LOGDEBUG("%s Got comment: %s", stamp, buf); + continue; + } + LOGDEBUG("%s Got message: %s", stamp, buf); + log_entry = ckalloc(sizeof(struct input_log)); + log_entry->buf = buf; + CDL_PREPEND(input_log, log_entry); + + sockd = open_unix_client(socket_dir); + if (sockd < 0) { + LOGERR("Failed to open socket: %s", socket_dir); + break; + } + if (!send_unix_msg(sockd, buf)) { + LOGERR("Failed to send unix msg: %s", buf); + break; + } + buf2 = recv_unix_msg_tmo2(sockd, tmo1, tmo2); + close(sockd); + if (!buf2) { + LOGERR("Received empty reply"); + continue; + } + mkstamp(stamp, sizeof(stamp)); + LOGMSGSIZ(65536, LOG_NOTICE, "%s Received response: %s", stamp, buf2); + dealloc(buf2); + + if (counter) { + if ((++count % 100) == 0) { + printf("%8d\r", count); + fflush(stdout); + } + } + } + + dealloc(socket_dir); + sighandler(0); + + return 0; +} diff --git a/solo-ckpool-source/src/ckpool.c b/solo-ckpool-source/src/ckpool.c new file mode 100644 index 0000000..ea99b41 --- /dev/null +++ b/solo-ckpool-source/src/ckpool.c @@ -0,0 +1,1904 @@ +/* + * Copyright 2014-2020,2023 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ckpool.h" +#include "libckpool.h" +#include "generator.h" +#include "stratifier.h" +#include "connector.h" + +ckpool_t *global_ckp; + +static bool open_logfile(ckpool_t *ckp) +{ + if (ckp->logfd > 0) { + flock(ckp->logfd, LOCK_EX); + fflush(ckp->logfp); + Close(ckp->logfd); + } + ckp->logfp = fopen(ckp->logfilename, "ae"); + if (unlikely(!ckp->logfp)) { + LOGEMERG("Failed to make open log file %s", ckp->logfilename); + return false; + } + /* Make logging line buffered */ + setvbuf(ckp->logfp, NULL, _IOLBF, 0); + ckp->logfd = fileno(ckp->logfp); + ckp->lastopen_t = time(NULL); + return true; +} + +/* Use ckmsgqs for logging to console and files to prevent logmsg from blocking + * on any delays. */ +static void console_log(ckpool_t __maybe_unused *ckp, char *msg) +{ + /* Add clear line only if stderr is going to console */ + if (isatty(fileno(stderr))) + fprintf(stderr, "\33[2K\r"); + fprintf(stderr, "%s", msg); + fflush(stderr); + + free(msg); +} + +static void proclog(ckpool_t *ckp, char *msg) +{ + time_t log_t = time(NULL); + + /* Reopen log file every minute, allowing us to move/rename it and + * create a new logfile */ + if (log_t > ckp->lastopen_t + 60) { + LOGDEBUG("Reopening logfile"); + open_logfile(ckp); + } + + flock(ckp->logfd, LOCK_EX); + fprintf(ckp->logfp, "%s", msg); + flock(ckp->logfd, LOCK_UN); + + free(msg); +} + +void get_timestamp(char *stamp) +{ + struct tm tm; + tv_t now_tv; + int ms; + + tv_time(&now_tv); + ms = (int)(now_tv.tv_usec / 1000); + localtime_r(&(now_tv.tv_sec), &tm); + sprintf(stamp, "[%d-%02d-%02d %02d:%02d:%02d.%03d]", + tm.tm_year + 1900, + tm.tm_mon + 1, + tm.tm_mday, + tm.tm_hour, + tm.tm_min, + tm.tm_sec, ms); +} + +/* Log everything to the logfile, but display warnings on the console as well */ +void logmsg(int loglevel, const char *fmt, ...) +{ + int logfd = global_ckp->logfd; + char *log, *buf = NULL; + char stamp[128]; + va_list ap; + + if (global_ckp->loglevel < loglevel || !fmt) + return; + + va_start(ap, fmt); + VASPRINTF(&buf, fmt, ap); + va_end(ap); + + if (unlikely(!buf)) { + fprintf(stderr, "Null buffer sent to logmsg\n"); + return; + } + if (unlikely(!strlen(buf))) { + fprintf(stderr, "Zero length string sent to logmsg\n"); + goto out; + } + get_timestamp(stamp); + if (loglevel <= LOG_ERR && errno != 0) + ASPRINTF(&log, "%s %s with errno %d: %s\n", stamp, buf, errno, strerror(errno)); + else + ASPRINTF(&log, "%s %s\n", stamp, buf); + + if (unlikely(!global_ckp->console_logger)) { + fprintf(stderr, "%s", log); + goto out_free; + } + if (loglevel <= LOG_WARNING) + ckmsgq_add(global_ckp->console_logger, strdup(log)); + if (logfd > 0) + ckmsgq_add(global_ckp->logger, strdup(log)); +out_free: + free(log); +out: + free(buf); +} + +/* Generic function for creating a message queue receiving and parsing thread */ +static void *ckmsg_queue(void *arg) +{ + ckmsgq_t *ckmsgq = (ckmsgq_t *)arg; + ckpool_t *ckp = ckmsgq->ckp; + + pthread_detach(pthread_self()); + rename_proc(ckmsgq->name); + ckmsgq->active = true; + + while (42) { + ckmsg_t *msg; + tv_t now; + ts_t abs; + + mutex_lock(ckmsgq->lock); + tv_time(&now); + tv_to_ts(&abs, &now); + abs.tv_sec++; + if (!ckmsgq->msgs) + cond_timedwait(ckmsgq->cond, ckmsgq->lock, &abs); + msg = ckmsgq->msgs; + if (msg) + DL_DELETE(ckmsgq->msgs, msg); + mutex_unlock(ckmsgq->lock); + + if (!msg) + continue; + ckmsgq->func(ckp, msg->data); + free(msg); + } + return NULL; +} + +ckmsgq_t *create_ckmsgq(ckpool_t *ckp, const char *name, const void *func) +{ + ckmsgq_t *ckmsgq = ckzalloc(sizeof(ckmsgq_t)); + + strncpy(ckmsgq->name, name, 15); + ckmsgq->func = func; + ckmsgq->ckp = ckp; + ckmsgq->lock = ckalloc(sizeof(mutex_t)); + ckmsgq->cond = ckalloc(sizeof(pthread_cond_t)); + mutex_init(ckmsgq->lock); + cond_init(ckmsgq->cond); + create_pthread(&ckmsgq->pth, ckmsg_queue, ckmsgq); + + return ckmsgq; +} + +ckmsgq_t *create_ckmsgqs(ckpool_t *ckp, const char *name, const void *func, const int count) +{ + ckmsgq_t *ckmsgq = ckzalloc(sizeof(ckmsgq_t) * count); + mutex_t *lock; + pthread_cond_t *cond; + int i; + + lock = ckalloc(sizeof(mutex_t)); + cond = ckalloc(sizeof(pthread_cond_t)); + mutex_init(lock); + cond_init(cond); + + for (i = 0; i < count; i++) { + snprintf(ckmsgq[i].name, 15, "%.6s%x", name, i); + ckmsgq[i].func = func; + ckmsgq[i].ckp = ckp; + ckmsgq[i].lock = lock; + ckmsgq[i].cond = cond; + create_pthread(&ckmsgq[i].pth, ckmsg_queue, &ckmsgq[i]); + } + + return ckmsgq; +} + +/* Generic function for adding messages to a ckmsgq linked list and signal the + * ckmsgq parsing thread(s) to wake up and process it. */ +bool _ckmsgq_add(ckmsgq_t *ckmsgq, void *data, const char *file, const char *func, const int line) +{ + ckmsg_t *msg; + + if (unlikely(!ckmsgq)) { + LOGWARNING("Sending messages to no queue from %s %s:%d", file, func, line); + /* Discard data if we're unlucky enough to be sending it to + * msg queues not set up during start up */ + free(data); + return false; + } + while (unlikely(!ckmsgq->active)) + cksleep_ms(10); + + msg = ckalloc(sizeof(ckmsg_t)); + msg->data = data; + + mutex_lock(ckmsgq->lock); + ckmsgq->messages++; + DL_APPEND(ckmsgq->msgs, msg); + pthread_cond_broadcast(ckmsgq->cond); + mutex_unlock(ckmsgq->lock); + + return true; +} + +/* Return whether there are any messages queued in the ckmsgq linked list. */ +bool ckmsgq_empty(ckmsgq_t *ckmsgq) +{ + bool ret = true; + + if (unlikely(!ckmsgq || !ckmsgq->active)) + goto out; + + mutex_lock(ckmsgq->lock); + if (ckmsgq->msgs) + ret = (ckmsgq->msgs->next == ckmsgq->msgs->prev); + mutex_unlock(ckmsgq->lock); +out: + return ret; +} + +/* Create a standalone thread that queues received unix messages for a proc + * instance and adds them to linked list of received messages with their + * associated receive socket, then signal the associated rmsg_cond for the + * process to know we have more queued messages. The unix_msg_t ram must be + * freed by the code that removes the entry from the list. */ +static void *unix_receiver(void *arg) +{ + proc_instance_t *pi = (proc_instance_t *)arg; + int rsockd = pi->us.sockd, sockd; + char qname[16]; + + sprintf(qname, "%cunixrq", pi->processname[0]); + rename_proc(qname); + pthread_detach(pthread_self()); + + while (42) { + unix_msg_t *umsg; + char *buf; + + sockd = accept(rsockd, NULL, NULL); + if (unlikely(sockd < 0)) { + LOGEMERG("Failed to accept on %s socket, exiting", qname); + break; + } + buf = recv_unix_msg(sockd); + if (unlikely(!buf)) { + Close(sockd); + LOGWARNING("Failed to get message on %s socket", qname); + continue; + } + umsg = ckalloc(sizeof(unix_msg_t)); + umsg->sockd = sockd; + umsg->buf = buf; + + mutex_lock(&pi->rmsg_lock); + DL_APPEND(pi->unix_msgs, umsg); + pthread_cond_signal(&pi->rmsg_cond); + mutex_unlock(&pi->rmsg_lock); + } + + return NULL; +} + +/* Get the next message in the receive queue, or wait up to 5 seconds for + * the next message, returning NULL if no message is received in that time. */ +unix_msg_t *get_unix_msg(proc_instance_t *pi) +{ + unix_msg_t *umsg; + + mutex_lock(&pi->rmsg_lock); + if (!pi->unix_msgs) { + tv_t now; + ts_t abs; + + tv_time(&now); + tv_to_ts(&abs, &now); + abs.tv_sec += 5; + cond_timedwait(&pi->rmsg_cond, &pi->rmsg_lock, &abs); + } + umsg = pi->unix_msgs; + if (umsg) + DL_DELETE(pi->unix_msgs, umsg); + mutex_unlock(&pi->rmsg_lock); + + return umsg; +} + +static void create_unix_receiver(proc_instance_t *pi) +{ + pthread_t pth; + + mutex_init(&pi->rmsg_lock); + cond_init(&pi->rmsg_cond); + + create_pthread(&pth, unix_receiver, pi); +} + +/* Put a sanity check on kill calls to make sure we are not sending them to + * pid 0. */ +static int kill_pid(const int pid, const int sig) +{ + if (pid < 1) + return -1; + return kill(pid, sig); +} + +static int pid_wait(const pid_t pid, const int ms) +{ + tv_t start, now; + int ret; + + tv_time(&start); + do { + ret = kill_pid(pid, 0); + if (ret) + break; + tv_time(&now); + } while (ms_tvdiff(&now, &start) < ms); + return ret; +} + +static void api_message(ckpool_t *ckp, char **buf, int *sockd) +{ + apimsg_t *apimsg = ckalloc(sizeof(apimsg_t)); + + apimsg->buf = *buf; + *buf = NULL; + apimsg->sockd = *sockd; + *sockd = -1; + ckmsgq_add(ckp->ckpapi, apimsg); +} + +/* Listen for incoming global requests. Always returns a response if possible */ +static void *listener(void *arg) +{ + proc_instance_t *pi = (proc_instance_t *)arg; + unixsock_t *us = &pi->us; + ckpool_t *ckp = pi->ckp; + char *buf = NULL, *msg; + int sockd; + + rename_proc(pi->sockname); +retry: + dealloc(buf); + sockd = accept(us->sockd, NULL, NULL); + if (sockd < 0) { + LOGERR("Failed to accept on socket in listener"); + goto out; + } + + buf = recv_unix_msg(sockd); + if (!buf) { + LOGWARNING("Failed to get message in listener"); + send_unix_msg(sockd, "failed"); + } else if (buf[0] == '{') { + /* Any JSON messages received are for the RPC API to handle */ + api_message(ckp, &buf, &sockd); + } else if (cmdmatch(buf, "shutdown")) { + LOGWARNING("Listener received shutdown message, terminating ckpool"); + send_unix_msg(sockd, "exiting"); + goto out; + } else if (cmdmatch(buf, "ping")) { + LOGDEBUG("Listener received ping request"); + send_unix_msg(sockd, "pong"); + } else if (cmdmatch(buf, "loglevel")) { + int loglevel; + + if (sscanf(buf, "loglevel=%d", &loglevel) != 1) { + LOGWARNING("Failed to parse loglevel message %s", buf); + send_unix_msg(sockd, "Failed"); + } else if (loglevel < LOG_EMERG || loglevel > LOG_DEBUG) { + LOGWARNING("Invalid loglevel %d sent", loglevel); + send_unix_msg(sockd, "Invalid"); + } else { + ckp->loglevel = loglevel; + send_unix_msg(sockd, "success"); + } + } else if (cmdmatch(buf, "getxfd")) { + int fdno = -1; + + sscanf(buf, "getxfd%d", &fdno); + connector_send_fd(ckp, fdno, sockd); + } else if (cmdmatch(buf, "accept")) { + LOGWARNING("Listener received accept message, accepting clients"); + send_proc(ckp->connector, "accept"); + send_unix_msg(sockd, "accepting"); + } else if (cmdmatch(buf, "reject")) { + LOGWARNING("Listener received reject message, rejecting clients"); + send_proc(ckp->connector, "reject"); + send_unix_msg(sockd, "rejecting"); + } else if (cmdmatch(buf, "reconnect")) { + LOGWARNING("Listener received request to send reconnect to clients"); + send_proc(ckp->stratifier, buf); + send_unix_msg(sockd, "reconnecting"); + } else if (cmdmatch(buf, "restart")) { + LOGWARNING("Listener received restart message, attempting handover"); + send_unix_msg(sockd, "restarting"); + if (!fork()) { + if (!ckp->handover) { + ckp->initial_args[ckp->args++] = strdup("-H"); + ckp->initial_args[ckp->args] = NULL; + } + execv(ckp->initial_args[0], (char *const *)ckp->initial_args); + } + } else if (cmdmatch(buf, "stratifierstats")) { + LOGDEBUG("Listener received stratifierstats request"); + msg = stratifier_stats(ckp, ckp->sdata); + send_unix_msg(sockd, msg); + dealloc(msg); + } else if (cmdmatch(buf, "connectorstats")) { + LOGDEBUG("Listener received connectorstats request"); + msg = connector_stats(ckp->cdata, 0); + send_unix_msg(sockd, msg); + dealloc(msg); + } else if (cmdmatch(buf, "resetshares")) { + LOGWARNING("Resetting best shares"); + send_proc(ckp->stratifier, buf); + send_unix_msg(sockd, "resetting"); + } else { + LOGINFO("Listener received unhandled message: %s", buf); + send_unix_msg(sockd, "unknown"); + } + Close(sockd); + goto retry; +out: + dealloc(buf); + close_unix_socket(us->sockd, us->path); + return NULL; +} + +void empty_buffer(connsock_t *cs) +{ + if (cs->buf) + cs->buf[0] = '\0'; + cs->buflen = cs->bufofs = 0; +} + +int set_sendbufsize(ckpool_t *ckp, const int fd, const int len) +{ + socklen_t optlen; + int opt; + + optlen = sizeof(opt); + opt = len * 4 / 3; + setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &opt, optlen); + getsockopt(fd, SOL_SOCKET, SO_SNDBUF, &opt, &optlen); + opt /= 2; + if (opt < len) { + LOGDEBUG("Failed to set desired sendbufsize of %d unprivileged, only got %d", + len, opt); + optlen = sizeof(opt); + opt = len * 4 / 3; + setsockopt(fd, SOL_SOCKET, SO_SNDBUFFORCE, &opt, optlen); + getsockopt(fd, SOL_SOCKET, SO_SNDBUF, &opt, &optlen); + opt /= 2; + } + if (opt < len) { + LOGNOTICE("Failed to increase sendbufsize to %d, increase wmem_max or start %s privileged if using a remote btcd", + len, ckp->name); + ckp->wmem_warn = true; + } else + LOGDEBUG("Increased sendbufsize to %d of desired %d", opt, len); + return opt; +} + +int set_recvbufsize(ckpool_t *ckp, const int fd, const int len) +{ + socklen_t optlen; + int opt; + + optlen = sizeof(opt); + opt = len * 4 / 3; + setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &opt, optlen); + getsockopt(fd, SOL_SOCKET, SO_RCVBUF, &opt, &optlen); + opt /= 2; + if (opt < len) { + LOGDEBUG("Failed to set desired rcvbufsiz of %d unprivileged, only got %d", + len, opt); + optlen = sizeof(opt); + opt = len * 4 / 3; + setsockopt(fd, SOL_SOCKET, SO_RCVBUFFORCE, &opt, optlen); + getsockopt(fd, SOL_SOCKET, SO_RCVBUF, &opt, &optlen); + opt /= 2; + } + if (opt < len) { + LOGNOTICE("Failed to increase rcvbufsiz to %d, increase rmem_max or start %s privileged if using a remote btcd", + len, ckp->name); + ckp->rmem_warn = true; + } else + LOGDEBUG("Increased rcvbufsiz to %d of desired %d", opt, len); + return opt; +} + +/* If there is any cs->buflen it implies a full line was received on the last + * pass through read_socket_line and subsequently processed, leaving + * unprocessed data beyond cs->bufofs. Otherwise a zero buflen means there is + * only unprocessed data of bufofs length. */ +static void clear_bufline(connsock_t *cs) +{ + if (unlikely(!cs->buf)) { + socklen_t optlen = sizeof(cs->rcvbufsiz); + + cs->buf = ckzalloc(PAGESIZE); + cs->bufsize = PAGESIZE; + getsockopt(cs->fd, SOL_SOCKET, SO_RCVBUF, &cs->rcvbufsiz, &optlen); + cs->rcvbufsiz /= 2; + LOGDEBUG("connsock rcvbufsiz detected as %d", cs->rcvbufsiz); + } else if (cs->buflen) { + memmove(cs->buf, cs->buf + cs->bufofs, cs->buflen); + memset(cs->buf + cs->buflen, 0, cs->bufofs); + cs->bufofs = cs->buflen; + cs->buflen = 0; + cs->buf[cs->bufofs] = '\0'; + } +} + +static void add_buflen(ckpool_t *ckp, connsock_t *cs, const char *readbuf, const int len) +{ + int backoff = 1; + int buflen; + + buflen = round_up_page(cs->bufofs + len + 1); + while (cs->bufsize < buflen) { + char *newbuf = realloc(cs->buf, buflen); + + if (likely(newbuf)) { + cs->bufsize = buflen; + cs->buf = newbuf; + break; + } + if (backoff == 1) + fprintf(stderr, "Failed to realloc %d in read_socket_line, retrying\n", (int)buflen); + cksleep_ms(backoff); + backoff <<= 1; + } + /* Increase receive buffer if possible to larger than the largest + * message we're likely to buffer */ + if (unlikely(!ckp->rmem_warn && buflen > cs->rcvbufsiz)) + cs->rcvbufsiz = set_recvbufsize(ckp, cs->fd, buflen); + + memcpy(cs->buf + cs->bufofs, readbuf, len); + cs->bufofs += len; + cs->buf[cs->bufofs] = '\0'; +} + +/* Receive as much data is currently available without blocking into a connsock + * buffer. Returns total length of data read. */ +static int recv_available(ckpool_t *ckp, connsock_t *cs) +{ + char readbuf[PAGESIZE]; + int len = 0, ret; + + do { + ret = recv(cs->fd, readbuf, PAGESIZE - 4, MSG_DONTWAIT); + if (ret > 0) { + add_buflen(ckp, cs, readbuf, ret); + len += ret; + } + } while (ret > 0); + + return len; +} + +/* Read from a socket into cs->buf till we get an '\n', converting it to '\0' + * and storing how much extra data we've received, to be moved to the beginning + * of the buffer for use on the next receive. Returns length of the line if a + * whole line is received, zero if none/some data is received without an EOL + * and -1 on error. */ +int read_socket_line(connsock_t *cs, float *timeout) +{ + ckpool_t *ckp = cs->ckp; + bool quiet = ckp->proxy | ckp->remote; + char *eom = NULL; + tv_t start, now; + float diff; + int ret; + + clear_bufline(cs); + recv_available(ckp, cs); // Intentionally ignore return value + eom = memchr(cs->buf, '\n', cs->bufofs); + + tv_time(&start); + + while (!eom) { + if (unlikely(cs->fd < 0)) { + ret = -1; + goto out; + } + + if (*timeout < 0) { + if (quiet) + LOGINFO("Timed out in read_socket_line"); + else + LOGERR("Timed out in read_socket_line"); + ret = 0; + goto out; + } + ret = wait_read_select(cs->fd, *timeout); + if (ret < 1) { + if (quiet) + LOGINFO("Select %s in read_socket_line", !ret ? "timed out" : "failed"); + else + LOGERR("Select %s in read_socket_line", !ret ? "timed out" : "failed"); + goto out; + } + ret = recv_available(ckp, cs); + if (ret < 1) { + /* If we have done wait_read_select there should be + * something to read and if we get nothing it means the + * socket is closed. */ + if (quiet) + LOGINFO("Failed to recv in read_socket_line"); + else + LOGERR("Failed to recv in read_socket_line"); + ret = -1; + goto out; + } + eom = memchr(cs->buf, '\n', cs->bufofs); + tv_time(&now); + diff = tvdiff(&now, &start); + copy_tv(&start, &now); + *timeout -= diff; + } + ret = eom - cs->buf; + + cs->buflen = cs->buf + cs->bufofs - eom - 1; + if (cs->buflen) + cs->bufofs = eom - cs->buf + 1; + else + cs->bufofs = 0; + *eom = '\0'; +out: + if (ret < 0) { + empty_buffer(cs); + dealloc(cs->buf); + } + return ret; +} + +/* We used to send messages between each proc_instance via unix sockets when + * ckpool was a multi-process model but that is no longer required so we can + * place the messages directly on the other proc_instance's queue until we + * deprecate this mechanism. */ +void _queue_proc(proc_instance_t *pi, const char *msg, const char *file, const char *func, const int line) +{ + unix_msg_t *umsg; + + if (unlikely(!msg || !strlen(msg))) { + LOGWARNING("Null msg passed to queue_proc from %s %s:%d", file, func, line); + return; + } + umsg = ckalloc(sizeof(unix_msg_t)); + umsg->sockd = -1; + umsg->buf = strdup(msg); + + mutex_lock(&pi->rmsg_lock); + DL_APPEND(pi->unix_msgs, umsg); + pthread_cond_signal(&pi->rmsg_cond); + mutex_unlock(&pi->rmsg_lock); +} + +/* Send a single message to a process instance and retrieve the response, then + * close the socket. */ +char *_send_recv_proc(const proc_instance_t *pi, const char *msg, int writetimeout, int readtimedout, + const char *file, const char *func, const int line) +{ + char *path = pi->us.path, *buf = NULL; + int sockd; + + if (unlikely(!path || !strlen(path))) { + LOGERR("Attempted to send message %s to null path in send_proc", msg ? msg : ""); + goto out; + } + if (unlikely(!msg || !strlen(msg))) { + LOGERR("Attempted to send null message to socket %s in send_proc", path); + goto out; + } + sockd = open_unix_client(path); + if (unlikely(sockd < 0)) { + LOGWARNING("Failed to open socket %s in send_recv_proc", path); + goto out; + } + if (unlikely(!_send_unix_msg(sockd, msg, writetimeout, file, func, line))) + LOGWARNING("Failed to send %s to socket %s", msg, path); + else + buf = _recv_unix_msg(sockd, readtimedout, readtimedout, file, func, line); + Close(sockd); +out: + if (unlikely(!buf)) + LOGERR("Failure in send_recv_proc from %s %s:%d", file, func, line); + return buf; +} + +static const char *rpc_method(const char *rpc_req) +{ + const char *ptr = strchr(rpc_req, ':'); + if (ptr) + return ptr+1; + return rpc_req; +} + +/* All of these calls are made to bitcoind which prefers open/close instead + * of persistent connections so cs->fd is always invalid. */ +static json_t *_json_rpc_call(connsock_t *cs, const char *rpc_req, const bool info_only) +{ + float timeout = RPC_TIMEOUT; + char *http_req = NULL; + json_error_t err_val; + char *warning = NULL; + json_t *val = NULL; + tv_t stt_tv, fin_tv; + double elapsed; + int len, ret; + + /* Serialise all calls in case we use cs from multiple threads */ + cksem_wait(&cs->sem); + cs->fd = connect_socket(cs->url, cs->port); + if (unlikely(cs->fd < 0)) { + ASPRINTF(&warning, "Unable to connect socket to %s:%s in %s", cs->url, cs->port, __func__); + goto out; + } + if (unlikely(!cs->url)) { + ASPRINTF(&warning, "No URL in %s", __func__); + goto out; + } + if (unlikely(!cs->port)) { + ASPRINTF(&warning, "No port in %s", __func__); + goto out; + } + if (unlikely(!cs->auth)) { + ASPRINTF(&warning, "No auth in %s", __func__); + goto out; + } + if (unlikely(!rpc_req)) { + ASPRINTF(&warning, "Null rpc_req passed to %s", __func__); + goto out; + } + len = strlen(rpc_req); + if (unlikely(!len)) { + ASPRINTF(&warning, "Zero length rpc_req passed to %s", __func__); + goto out; + } + http_req = ckalloc(len + 256); // Leave room for headers + sprintf(http_req, + "POST / HTTP/1.1\n" + "Authorization: Basic %s\n" + "Host: %s:%s\n" + "Content-type: application/json\n" + "Content-Length: %d\n\n%s", + cs->auth, cs->url, cs->port, len, rpc_req); + + len = strlen(http_req); + tv_time(&stt_tv); + ret = write_socket(cs->fd, http_req, len); + if (ret != len) { + tv_time(&fin_tv); + elapsed = tvdiff(&fin_tv, &stt_tv); + ASPRINTF(&warning, "Failed to write to socket in %s (%.10s...) %.3fs", + __func__, rpc_method(rpc_req), elapsed); + goto out_empty; + } + ret = read_socket_line(cs, &timeout); + if (ret < 1) { + tv_time(&fin_tv); + elapsed = tvdiff(&fin_tv, &stt_tv); + ASPRINTF(&warning, "Failed to read socket line in %s (%.10s...) %.3fs", + __func__, rpc_method(rpc_req), elapsed); + goto out_empty; + } + if (strncasecmp(cs->buf, "HTTP/1.1 200 OK", 15)) { + tv_time(&fin_tv); + elapsed = tvdiff(&fin_tv, &stt_tv); + ASPRINTF(&warning, "HTTP response to (%.10s...) %.3fs not ok: %s", + rpc_method(rpc_req), elapsed, cs->buf); + timeout = 0; + /* Look for a json response if there is one */ + while (read_socket_line(cs, &timeout) > 0) { + timeout = 0; + if (*cs->buf != '{') + continue; + free(warning); + /* Replace the warning with the json response */ + ASPRINTF(&warning, "JSON response to (%.10s...) %.3fs not ok: %s", + rpc_method(rpc_req), elapsed, cs->buf); + break; + } + goto out_empty; + } + do { + ret = read_socket_line(cs, &timeout); + if (ret < 1) { + tv_time(&fin_tv); + elapsed = tvdiff(&fin_tv, &stt_tv); + ASPRINTF(&warning, "Failed to read http socket lines in %s (%.10s...) %.3fs", + __func__, rpc_method(rpc_req), elapsed); + goto out_empty; + } + } while (strncmp(cs->buf, "{", 1)); + tv_time(&fin_tv); + elapsed = tvdiff(&fin_tv, &stt_tv); + if (elapsed > 5.0) { + ASPRINTF(&warning, "HTTP socket read+write took %.3fs in %s (%.10s...)", + elapsed, __func__, rpc_method(rpc_req)); + } + + val = json_loads(cs->buf, 0, &err_val); + if (!val) { + ASPRINTF(&warning, "JSON decode (%.10s...) failed(%d): %s", + rpc_method(rpc_req), err_val.line, err_val.text); + } +out_empty: + empty_socket(cs->fd); + empty_buffer(cs); +out: + if (warning) { + if (info_only) + LOGINFO("%s", warning); + else + LOGWARNING("%s", warning); + free(warning); + } + Close(cs->fd); + free(http_req); + dealloc(cs->buf); + cksem_post(&cs->sem); + return val; +} + +json_t *json_rpc_call(connsock_t *cs, const char *rpc_req) +{ + return _json_rpc_call(cs, rpc_req, false); +} + +json_t *json_rpc_response(connsock_t *cs, const char *rpc_req) +{ + return _json_rpc_call(cs, rpc_req, true); +} + +/* For when we are submitting information that is not important and don't care + * about the response. */ +void json_rpc_msg(connsock_t *cs, const char *rpc_req) +{ + json_t *val = _json_rpc_call(cs, rpc_req, true); + + /* We don't care about the result */ + json_decref(val); +} + +static void terminate_oldpid(const ckpool_t *ckp, proc_instance_t *pi, const pid_t oldpid) +{ + if (!ckp->killold) { + quit(1, "Process %s pid %d still exists, start ckpool with -H to get a handover or -k if you wish to kill it", + pi->processname, oldpid); + } + LOGNOTICE("Terminating old process %s pid %d", pi->processname, oldpid); + if (kill_pid(oldpid, 15)) + quit(1, "Unable to kill old process %s pid %d", pi->processname, oldpid); + LOGWARNING("Terminating old process %s pid %d", pi->processname, oldpid); + if (pid_wait(oldpid, 500)) + return; + LOGWARNING("Old process %s pid %d failed to respond to terminate request, killing", + pi->processname, oldpid); + if (kill_pid(oldpid, 9) || !pid_wait(oldpid, 3000)) + quit(1, "Unable to kill old process %s pid %d", pi->processname, oldpid); +} + +/* This is for blocking sends of json messages */ +bool _send_json_msg(connsock_t *cs, const json_t *json_msg, const char *file, const char *func, const int line) +{ + bool ret = false; + int len, sent; + char *s; + + if (unlikely(!json_msg)) { + LOGWARNING("Empty json msg in send_json_msg from %s %s:%d", file, func, line); + goto out; + } + s = json_dumps(json_msg, JSON_ESCAPE_SLASH | JSON_EOL); + if (unlikely(!s)) { + LOGWARNING("Empty json dump in send_json_msg from %s %s:%d", file, func, line); + goto out; + } + LOGDEBUG("Sending json msg: %s", s); + len = strlen(s); + if (unlikely(!len)) { + LOGWARNING("Zero length string in send_json_msg from %s %s:%d", file, func, line); + goto out; + } + sent = write_socket(cs->fd, s, len); + dealloc(s); + if (sent != len) { + LOGNOTICE("Failed to send %d bytes sent %d in send_json_msg", len, sent); + goto out; + } + ret = true; +out: + return ret; +} + +/* Decode a string that should have a json message and return just the contents + * of the result key or NULL. */ +static json_t *json_result(json_t *val) +{ + json_t *res_val = NULL, *err_val; + + res_val = json_object_get(val, "result"); + /* (null) is a valid result while no value is an error, so mask out + * (null) and only handle lack of result */ + if (json_is_null(res_val)) + res_val = NULL; + else if (!res_val) { + char *ss; + + err_val = json_object_get(val, "error"); + if (err_val) + ss = json_dumps(err_val, 0); + else + ss = strdup("(unknown reason)"); + + LOGNOTICE("JSON-RPC decode of json_result failed: %s", ss); + free(ss); + } + return res_val; +} + +/* Return the error value if one exists */ +static json_t *json_errval(json_t *val) +{ + json_t *err_val = json_object_get(val, "error"); + + return err_val; +} + +/* Parse a string and return the json value it contains, if any, and the + * result in res_val. Return NULL if no result key is found. */ +json_t *json_msg_result(const char *msg, json_t **res_val, json_t **err_val) +{ + json_error_t err; + json_t *val; + + *res_val = NULL; + val = json_loads(msg, 0, &err); + if (!val) { + LOGWARNING("Json decode failed(%d): %s", err.line, err.text); + goto out; + } + *res_val = json_result(val); + *err_val = json_errval(val); + +out: + return val; +} + +/* Open the file in path, check if there is a pid in there that still exists + * and if not, write the pid into that file. */ +static bool write_pid(ckpool_t *ckp, const char *path, proc_instance_t *pi, const pid_t pid, const pid_t oldpid) +{ + FILE *fp; + + if (ckp->handover && oldpid && !pid_wait(oldpid, 500)) { + LOGWARNING("Old process pid %d failed to shutdown cleanly, terminating", oldpid); + terminate_oldpid(ckp, pi, oldpid); + } + + fp = fopen(path, "we"); + if (!fp) { + LOGERR("Failed to open file %s", path); + return false; + } + fprintf(fp, "%d", pid); + fclose(fp); + + return true; +} + +static void name_process_sockname(unixsock_t *us, const proc_instance_t *pi) +{ + us->path = strdup(pi->ckp->socket_dir); + realloc_strcat(&us->path, pi->sockname); +} + +static void open_process_sock(ckpool_t *ckp, const proc_instance_t *pi, unixsock_t *us) +{ + LOGDEBUG("Opening %s", us->path); + us->sockd = open_unix_server(us->path); + if (unlikely(us->sockd < 0)) + quit(1, "Failed to open %s socket", pi->sockname); + if (chown(us->path, -1, ckp->gr_gid)) + quit(1, "Failed to set %s to group id %d", us->path, ckp->gr_gid); +} + +static void create_process_unixsock(proc_instance_t *pi) +{ + unixsock_t *us = &pi->us; + ckpool_t *ckp = pi->ckp; + + name_process_sockname(us, pi); + open_process_sock(ckp, pi, us); +} + +static void write_namepid(proc_instance_t *pi) +{ + char s[256]; + + pi->pid = getpid(); + sprintf(s, "%s%s.pid", pi->ckp->socket_dir, pi->processname); + if (!write_pid(pi->ckp, s, pi, pi->pid, pi->oldpid)) + quit(1, "Failed to write %s pid %d", pi->processname, pi->pid); +} + +static void rm_namepid(const proc_instance_t *pi) +{ + char s[256]; + + sprintf(s, "%s%s.pid", pi->ckp->socket_dir, pi->processname); + unlink(s); +} + +static void launch_logger(ckpool_t *ckp) +{ + ckp->logger = create_ckmsgq(ckp, "logger", &proclog); + ckp->console_logger = create_ckmsgq(ckp, "conlog", &console_log); +} + +static void clean_up(ckpool_t *ckp) +{ + rm_namepid(&ckp->main); + dealloc(ckp->socket_dir); +} + +static void cancel_pthread(pthread_t *pth) +{ + if (!pth || !*pth) + return; + pthread_cancel(*pth); + pth = NULL; +} + +static void sighandler(const int sig) +{ + ckpool_t *ckp = global_ckp; + + signal(sig, SIG_IGN); + signal(SIGTERM, SIG_IGN); + LOGWARNING("Process %s received signal %d, shutting down", + ckp->name, sig); + + cancel_pthread(&ckp->pth_listener); + exit(0); +} + +static bool _json_get_string(char **store, const json_t *entry, const char *res) +{ + bool ret = false; + const char *buf; + + *store = NULL; + if (!entry || json_is_null(entry)) { + LOGDEBUG("Json did not find entry %s", res); + goto out; + } + if (!json_is_string(entry)) { + LOGWARNING("Json entry %s is not a string", res); + goto out; + } + buf = json_string_value(entry); + LOGDEBUG("Json found entry %s: %s", res, buf); + *store = strdup(buf); + ret = true; +out: + return ret; +} + +bool json_get_string(char **store, const json_t *val, const char *res) +{ + return _json_get_string(store, json_object_get(val, res), res); +} + +/* Used when there must be a valid string */ +static void json_get_configstring(char **store, const json_t *val, const char *res) +{ + bool ret = _json_get_string(store, json_object_get(val, res), res); + + if (!ret) { + LOGEMERG("Invalid config string or missing object for %s", res); + exit(1); + } +} + +bool json_get_int64(int64_t *store, const json_t *val, const char *res) +{ + json_t *entry = json_object_get(val, res); + bool ret = false; + + if (!entry) { + LOGDEBUG("Json did not find entry %s", res); + goto out; + } + if (!json_is_integer(entry)) { + LOGINFO("Json entry %s is not an integer", res); + goto out; + } + *store = json_integer_value(entry); + LOGDEBUG("Json found entry %s: %"PRId64, res, *store); + ret = true; +out: + return ret; +} + +bool json_get_int(int *store, const json_t *val, const char *res) +{ + json_t *entry = json_object_get(val, res); + bool ret = false; + + if (!entry) { + LOGDEBUG("Json did not find entry %s", res); + goto out; + } + if (!json_is_integer(entry)) { + LOGWARNING("Json entry %s is not an integer", res); + goto out; + } + *store = json_integer_value(entry); + LOGDEBUG("Json found entry %s: %d", res, *store); + ret = true; +out: + return ret; +} + +bool json_get_double(double *store, const json_t *val, const char *res) +{ + json_t *entry = json_object_get(val, res); + bool ret = false; + + if (!entry) { + LOGDEBUG("Json did not find entry %s", res); + goto out; + } + if (!json_is_real(entry)) { + LOGWARNING("Json entry %s is not a double", res); + goto out; + } + *store = json_real_value(entry); + LOGDEBUG("Json found entry %s: %f", res, *store); + ret = true; +out: + return ret; +} + +bool json_get_uint32(uint32_t *store, const json_t *val, const char *res) +{ + json_t *entry = json_object_get(val, res); + bool ret = false; + + if (!entry) { + LOGDEBUG("Json did not find entry %s", res); + goto out; + } + if (!json_is_integer(entry)) { + LOGWARNING("Json entry %s is not an integer", res); + goto out; + } + *store = json_integer_value(entry); + LOGDEBUG("Json found entry %s: %u", res, *store); + ret = true; +out: + return ret; +} + +bool json_get_bool(bool *store, const json_t *val, const char *res) +{ + json_t *entry = json_object_get(val, res); + bool ret = false; + + if (!entry) { + LOGDEBUG("Json did not find entry %s", res); + goto out; + } + if (!json_is_boolean(entry)) { + LOGINFO("Json entry %s is not a boolean", res); + goto out; + } + *store = json_is_true(entry); + LOGDEBUG("Json found entry %s: %s", res, *store ? "true" : "false"); + ret = true; +out: + return ret; +} + +bool json_getdel_int(int *store, json_t *val, const char *res) +{ + bool ret; + + ret = json_get_int(store, val, res); + if (ret) + json_object_del(val, res); + return ret; +} + +bool json_getdel_int64(int64_t *store, json_t *val, const char *res) +{ + bool ret; + + ret = json_get_int64(store, val, res); + if (ret) + json_object_del(val, res); + return ret; +} + +static void parse_btcds(ckpool_t *ckp, const json_t *arr_val, const int arr_size) +{ + json_t *val; + int i; + + ckp->btcds = arr_size; + ckp->btcdurl = ckzalloc(sizeof(char *) * arr_size); + ckp->btcdauth = ckzalloc(sizeof(char *) * arr_size); + ckp->btcdpass = ckzalloc(sizeof(char *) * arr_size); + ckp->btcdnotify = ckzalloc(sizeof(bool *) * arr_size); + for (i = 0; i < arr_size; i++) { + val = json_array_get(arr_val, i); + json_get_configstring(&ckp->btcdurl[i], val, "url"); + json_get_configstring(&ckp->btcdauth[i], val, "auth"); + json_get_configstring(&ckp->btcdpass[i], val, "pass"); + json_get_bool(&ckp->btcdnotify[i], val, "notify"); + } +} + +static void parse_proxies(ckpool_t *ckp, const json_t *arr_val, const int arr_size) +{ + json_t *val; + int i; + + ckp->proxies = arr_size; + ckp->proxyurl = ckzalloc(sizeof(char *) * arr_size); + ckp->proxyauth = ckzalloc(sizeof(char *) * arr_size); + ckp->proxypass = ckzalloc(sizeof(char *) * arr_size); + for (i = 0; i < arr_size; i++) { + val = json_array_get(arr_val, i); + json_get_configstring(&ckp->proxyurl[i], val, "url"); + json_get_configstring(&ckp->proxyauth[i], val, "auth"); + if (!json_get_string(&ckp->proxypass[i], val, "pass")) + ckp->proxypass[i] = strdup(""); + } +} + +static bool parse_serverurls(ckpool_t *ckp, const json_t *arr_val) +{ + bool ret = false; + int arr_size, i; + + if (!arr_val) + goto out; + if (!json_is_array(arr_val)) { + LOGINFO("Unable to parse serverurl entries as an array"); + goto out; + } + arr_size = json_array_size(arr_val); + if (!arr_size) { + LOGWARNING("Serverurl array empty"); + goto out; + } + ckp->serverurls = arr_size; + ckp->serverurl = ckalloc(sizeof(char *) * arr_size); + ckp->server_highdiff = ckzalloc(sizeof(bool) * arr_size); + ckp->nodeserver = ckzalloc(sizeof(bool) * arr_size); + ckp->trusted = ckzalloc(sizeof(bool) * arr_size); + for (i = 0; i < arr_size; i++) { + json_t *val = json_array_get(arr_val, i); + + if (!_json_get_string(&ckp->serverurl[i], val, "serverurl")) + LOGWARNING("Invalid serverurl entry number %d", i); + } + ret = true; +out: + return ret; +} + +static void parse_nodeservers(ckpool_t *ckp, const json_t *arr_val) +{ + int arr_size, i, j, total_urls; + + if (!arr_val) + return; + if (!json_is_array(arr_val)) { + LOGWARNING("Unable to parse nodeservers entries as an array"); + return; + } + arr_size = json_array_size(arr_val); + if (!arr_size) { + LOGWARNING("Nodeserver array empty"); + return; + } + total_urls = ckp->serverurls + arr_size; + ckp->serverurl = realloc(ckp->serverurl, sizeof(char *) * total_urls); + ckp->nodeserver = realloc(ckp->nodeserver, sizeof(bool) * total_urls); + ckp->trusted = realloc(ckp->trusted, sizeof(bool) * total_urls); + for (i = 0, j = ckp->serverurls; j < total_urls; i++, j++) { + json_t *val = json_array_get(arr_val, i); + + if (!_json_get_string(&ckp->serverurl[j], val, "nodeserver")) + LOGWARNING("Invalid nodeserver entry number %d", i); + ckp->nodeserver[j] = true; + ckp->nodeservers++; + } + ckp->serverurls = total_urls; +} + +static void parse_trusted(ckpool_t *ckp, const json_t *arr_val) +{ + int arr_size, i, j, total_urls; + + if (!arr_val) + return; + if (!json_is_array(arr_val)) { + LOGWARNING("Unable to parse trusted server entries as an array"); + return; + } + arr_size = json_array_size(arr_val); + if (!arr_size) { + LOGWARNING("Trusted array empty"); + return; + } + total_urls = ckp->serverurls + arr_size; + ckp->serverurl = realloc(ckp->serverurl, sizeof(char *) * total_urls); + ckp->nodeserver = realloc(ckp->nodeserver, sizeof(bool) * total_urls); + ckp->trusted = realloc(ckp->trusted, sizeof(bool) * total_urls); + for (i = 0, j = ckp->serverurls; j < total_urls; i++, j++) { + json_t *val = json_array_get(arr_val, i); + + if (!_json_get_string(&ckp->serverurl[j], val, "trusted")) + LOGWARNING("Invalid trusted server entry number %d", i); + ckp->trusted[j] = true; + } + ckp->serverurls = total_urls; +} + + +static bool parse_redirecturls(ckpool_t *ckp, const json_t *arr_val) +{ + bool ret = false; + int arr_size, i; + char *redirecturl, url[INET6_ADDRSTRLEN], port[8]; + redirecturl = alloca(INET6_ADDRSTRLEN); + + if (!arr_val) + goto out; + if (!json_is_array(arr_val)) { + LOGNOTICE("Unable to parse redirecturl entries as an array"); + goto out; + } + arr_size = json_array_size(arr_val); + if (!arr_size) { + LOGWARNING("redirecturl array empty"); + goto out; + } + ckp->redirecturls = arr_size; + ckp->redirecturl = ckalloc(sizeof(char *) * arr_size); + ckp->redirectport = ckalloc(sizeof(char *) * arr_size); + for (i = 0; i < arr_size; i++) { + json_t *val = json_array_get(arr_val, i); + + strncpy(redirecturl, json_string_value(val), INET6_ADDRSTRLEN - 1); + /* See that the url properly resolves */ + if (!url_from_serverurl(redirecturl, url, port)) + quit(1, "Invalid redirecturl entry %d %s", i, redirecturl); + ckp->redirecturl[i] = strdup(strsep(&redirecturl, ":")); + ckp->redirectport[i] = strdup(port); + } + ret = true; +out: + return ret; +} + + +static void parse_config(ckpool_t *ckp) +{ + json_t *json_conf, *arr_val; + json_error_t err_val; + char *url, *vmask; + int arr_size; + + json_conf = json_load_file(ckp->config, JSON_DISABLE_EOF_CHECK, &err_val); + if (!json_conf) { + LOGWARNING("Json decode error for config file %s: (%d): %s", ckp->config, + err_val.line, err_val.text); + return; + } + arr_val = json_object_get(json_conf, "btcd"); + if (arr_val && json_is_array(arr_val)) { + arr_size = json_array_size(arr_val); + if (arr_size) + parse_btcds(ckp, arr_val, arr_size); + } + json_get_string(&ckp->btcaddress, json_conf, "btcaddress"); + json_get_string(&ckp->btcsig, json_conf, "btcsig"); + if (ckp->btcsig && strlen(ckp->btcsig) > 38) { + LOGWARNING("Signature %s too long, truncating to 38 bytes", ckp->btcsig); + ckp->btcsig[38] = '\0'; + } + json_get_int(&ckp->blockpoll, json_conf, "blockpoll"); + json_get_int(&ckp->nonce1length, json_conf, "nonce1length"); + json_get_int(&ckp->nonce2length, json_conf, "nonce2length"); + json_get_int(&ckp->update_interval, json_conf, "update_interval"); + json_get_string(&vmask, json_conf, "version_mask"); + if (vmask && strlen(vmask) && validhex(vmask)) + sscanf(vmask, "%x", &ckp->version_mask); + else + ckp->version_mask = 0x1fffe000; + /* Look for an array first and then a single entry */ + arr_val = json_object_get(json_conf, "serverurl"); + if (!parse_serverurls(ckp, arr_val)) { + if (json_get_string(&url, json_conf, "serverurl")) { + ckp->serverurl = ckalloc(sizeof(char *)); + ckp->serverurl[0] = url; + ckp->serverurls = 1; + } + } + arr_val = json_object_get(json_conf, "nodeserver"); + parse_nodeservers(ckp, arr_val); + arr_val = json_object_get(json_conf, "trusted"); + parse_trusted(ckp, arr_val); + json_get_string(&ckp->upstream, json_conf, "upstream"); + json_get_double(&ckp->mindiff, json_conf, "mindiff"); + json_get_double(&ckp->startdiff, json_conf, "startdiff"); + json_get_double(&ckp->highdiff, json_conf, "highdiff"); + json_get_double(&ckp->maxdiff, json_conf, "maxdiff"); + json_get_string(&ckp->logdir, json_conf, "logdir"); + json_get_int(&ckp->maxclients, json_conf, "maxclients"); + json_get_double(&ckp->donation, json_conf, "donation"); + /* Avoid dust-sized donations */ + if (ckp->donation < 0.1) + ckp->donation = 0; + else if (ckp->donation > 99.9) + ckp->donation = 99.9; + arr_val = json_object_get(json_conf, "proxy"); + if (arr_val && json_is_array(arr_val)) { + arr_size = json_array_size(arr_val); + if (arr_size) + parse_proxies(ckp, arr_val, arr_size); + } + arr_val = json_object_get(json_conf, "redirecturl"); + if (arr_val) + parse_redirecturls(ckp, arr_val); + json_get_string(&ckp->zmqblock, json_conf, "zmqblock"); + + json_decref(json_conf); +} + +static void manage_old_instance(ckpool_t *ckp, proc_instance_t *pi) +{ + struct stat statbuf; + char path[256]; + FILE *fp; + + sprintf(path, "%s%s.pid", pi->ckp->socket_dir, pi->processname); + if (!stat(path, &statbuf)) { + int oldpid, ret; + + LOGNOTICE("File %s exists", path); + fp = fopen(path, "re"); + if (!fp) + quit(1, "Failed to open file %s", path); + ret = fscanf(fp, "%d", &oldpid); + fclose(fp); + if (ret == 1 && !(kill_pid(oldpid, 0))) { + LOGNOTICE("Old process %s pid %d still exists", pi->processname, oldpid); + if (ckp->handover) { + LOGINFO("Saving pid to be handled at handover"); + pi->oldpid = oldpid; + return; + } + terminate_oldpid(ckp, pi, oldpid); + } + } +} + +static void prepare_child(ckpool_t *ckp, proc_instance_t *pi, void *process, char *name) +{ + pi->ckp = ckp; + pi->processname = name; + pi->sockname = pi->processname; + create_process_unixsock(pi); + create_pthread(&pi->pth_process, process, pi); + create_unix_receiver(pi); +} + +static struct option long_options[] = { + {"btcsolo", no_argument, 0, 'B'}, + {"config", required_argument, 0, 'c'}, + {"daemonise", no_argument, 0, 'D'}, + {"group", required_argument, 0, 'g'}, + {"handover", no_argument, 0, 'H'}, + {"help", no_argument, 0, 'h'}, + {"killold", no_argument, 0, 'k'}, + {"log-shares", no_argument, 0, 'L'}, + {"loglevel", required_argument, 0, 'l'}, + {"name", required_argument, 0, 'n'}, + {"node", no_argument, 0, 'N'}, + {"passthrough", no_argument, 0, 'P'}, + {"proxy", no_argument, 0, 'p'}, + {"quiet", no_argument, 0, 'q'}, + {"redirector", no_argument, 0, 'R'}, + {"sockdir", required_argument, 0, 's'}, + {"trusted", no_argument, 0, 't'}, + {"userproxy", no_argument, 0, 'u'}, + {0, 0, 0, 0} +}; + +static bool send_recv_path(const char *path, const char *msg) +{ + int sockd = open_unix_client(path); + bool ret = false; + char *response; + + send_unix_msg(sockd, msg); + response = recv_unix_msg(sockd); + if (response) { + ret = true; + LOGWARNING("Received: %s in response to %s request", response, msg); + dealloc(response); + } else + LOGWARNING("Received no response to %s request", msg); + Close(sockd); + return ret; +} + +int main(int argc, char **argv) +{ + struct sigaction handler; + int c, ret, i = 0, j; + char buf[512] = {}; + char *appname; + ckpool_t ckp; + + /* Make significant floating point errors fatal to avoid subtle bugs being missed */ + feenableexcept(FE_DIVBYZERO | FE_INVALID); + json_set_alloc_funcs(json_ckalloc, free); + + global_ckp = &ckp; + memset(&ckp, 0, sizeof(ckp)); + ckp.starttime = time(NULL); + ckp.startpid = getpid(); + ckp.loglevel = LOG_NOTICE; + ckp.initial_args = ckalloc(sizeof(char *) * (argc + 2)); /* Leave room for extra -H */ + for (ckp.args = 0; ckp.args < argc; ckp.args++) + ckp.initial_args[ckp.args] = strdup(argv[ckp.args]); + ckp.initial_args[ckp.args] = NULL; + + appname = basename(argv[0]); + if (!strcmp(appname, "ckproxy")) + ckp.proxy = true; + + while ((c = getopt_long(argc, argv, "Bc:Dd:g:HhkLl:Nn:PpqRS:s:tu", long_options, &i)) != -1) { + switch (c) { + case 'B': + if (ckp.proxy) + quit(1, "Cannot set both proxy and btcsolo mode"); + ckp.btcsolo = true; + break; + case 'c': + ckp.config = optarg; + break; + case 'D': + ckp.daemon = true; + break; + case 'g': + ckp.grpnam = optarg; + break; + case 'H': + ckp.handover = true; + ckp.killold = true; + break; + case 'h': + for (j = 0; long_options[j].val; j++) { + struct option *jopt = &long_options[j]; + + if (jopt->has_arg) { + char *upper = alloca(strlen(jopt->name) + 1); + int offset = 0; + + do { + upper[offset] = toupper(jopt->name[offset]); + } while (upper[offset++] != '\0'); + printf("-%c %s | --%s %s\n", jopt->val, + upper, jopt->name, upper); + } else + printf("-%c | --%s\n", jopt->val, jopt->name); + } + exit(0); + case 'k': + ckp.killold = true; + break; + case 'L': + ckp.logshares = true; + break; + case 'l': + ckp.loglevel = atoi(optarg); + if (ckp.loglevel < LOG_EMERG || ckp.loglevel > LOG_DEBUG) { + quit(1, "Invalid loglevel (range %d - %d): %d", + LOG_EMERG, LOG_DEBUG, ckp.loglevel); + } + break; + case 'N': + if (ckp.proxy || ckp.redirector || ckp.userproxy || ckp.passthrough) + quit(1, "Cannot set another proxy type or redirector and node mode"); + ckp.proxy = ckp.passthrough = ckp.node = true; + break; + case 'n': + ckp.name = optarg; + break; + case 'P': + if (ckp.proxy || ckp.redirector || ckp.userproxy || ckp.node) + quit(1, "Cannot set another proxy type or redirector and passthrough mode"); + ckp.proxy = ckp.passthrough = true; + break; + case 'p': + if (ckp.passthrough || ckp.redirector || ckp.userproxy || ckp.node) + quit(1, "Cannot set another proxy type or redirector and proxy mode"); + ckp.proxy = true; + break; + case 'q': + ckp.quiet = true; + break; + case 'R': + if (ckp.proxy || ckp.passthrough || ckp.userproxy || ckp.node) + quit(1, "Cannot set a proxy type or passthrough and redirector modes"); + ckp.proxy = ckp.passthrough = ckp.redirector = true; + break; + case 's': + ckp.socket_dir = strdup(optarg); + break; + case 't': + if (ckp.proxy) + quit(1, "Cannot set a proxy type and trusted remote mode"); + ckp.remote = true; + break; + case 'u': + if (ckp.proxy || ckp.redirector || ckp.passthrough || ckp.node) + quit(1, "Cannot set both userproxy and another proxy type or redirector"); + ckp.userproxy = ckp.proxy = true; + break; + } + } + + if (!ckp.name) { + if (ckp.node) + ckp.name = "cknode"; + else if (ckp.redirector) + ckp.name = "ckredirector"; + else if (ckp.passthrough) + ckp.name = "ckpassthrough"; + else if (ckp.proxy) + ckp.name = "ckproxy"; + else + ckp.name = "ckpool"; + } + snprintf(buf, 15, "%s", ckp.name); + prctl(PR_SET_NAME, buf, 0, 0, 0); + memset(buf, 0, 15); + + if (ckp.grpnam) { + struct group *group = getgrnam(ckp.grpnam); + + if (!group) + quit(1, "Failed to find group %s", ckp.grpnam); + ckp.gr_gid = group->gr_gid; + } else + ckp.gr_gid = getegid(); + + if (!ckp.config) { + ckp.config = strdup(ckp.name); + realloc_strcat(&ckp.config, ".conf"); + } + if (!ckp.socket_dir) { + ckp.socket_dir = strdup("/tmp/"); + realloc_strcat(&ckp.socket_dir, ckp.name); + } + trail_slash(&ckp.socket_dir); + + /* Ignore sigpipe */ + signal(SIGPIPE, SIG_IGN); + + ret = mkdir(ckp.socket_dir, 0750); + if (ret && errno != EEXIST) + quit(1, "Failed to make directory %s", ckp.socket_dir); + + parse_config(&ckp); + /* Set defaults if not found in config file */ + if (!ckp.btcds) { + ckp.btcds = 1; + ckp.btcdurl = ckzalloc(sizeof(char *)); + ckp.btcdauth = ckzalloc(sizeof(char *)); + ckp.btcdpass = ckzalloc(sizeof(char *)); + ckp.btcdnotify = ckzalloc(sizeof(bool)); + } + for (i = 0; i < ckp.btcds; i++) { + if (!ckp.btcdurl[i]) + ckp.btcdurl[i] = strdup("localhost:8332"); + if (!ckp.btcdauth[i]) + ckp.btcdauth[i] = strdup("user"); + if (!ckp.btcdpass[i]) + ckp.btcdpass[i] = strdup("pass"); + } + + ckp.donaddress = "bc1q28kkr5hk4gnqe3evma6runjrd2pvqyp8fpwfzu"; + + /* Donations on testnet are meaningless but required for complete + * testing. Testnet and regtest addresses */ + ckp.tndonaddress = "tb1q5fyv7tue73y4zxezh2c685qpwx0cfngfxlrgxh"; + ckp.rtdonaddress = "bcrt1qlk935ze2fsu86zjp395uvtegztrkaezawxx0wf"; + + if (!ckp.btcaddress && !ckp.btcsolo && !ckp.proxy) + quit(0, "Non solo mining must have a btcaddress in config, aborting!"); + if (!ckp.blockpoll) + ckp.blockpoll = 100; + if (!ckp.nonce1length) + ckp.nonce1length = 4; + else if (ckp.nonce1length < 2 || ckp.nonce1length > 8) + quit(0, "Invalid nonce1length %d specified, must be 2~8", ckp.nonce1length); + if (!ckp.nonce2length) { + /* nonce2length is zero by default in proxy mode */ + if (!ckp.proxy) + ckp.nonce2length = 8; + } else if (ckp.nonce2length < 2 || ckp.nonce2length > 8) + quit(0, "Invalid nonce2length %d specified, must be 2~8", ckp.nonce2length); + if (!ckp.update_interval) + ckp.update_interval = 30; + if (ckp.mindiff == 0.0) + ckp.mindiff = 1.0; + if (ckp.startdiff == 0.0) + ckp.startdiff = 42.0; + if (ckp.highdiff == 0.0) + ckp.highdiff = 1000000.0; + if (!ckp.logdir) + ckp.logdir = strdup("logs"); + if (!ckp.serverurls) + ckp.serverurl = ckzalloc(sizeof(char *)); + if (ckp.proxy && !ckp.proxies) + quit(0, "No proxy entries found in config file %s", ckp.config); + if (ckp.redirector && !ckp.redirecturls) + quit(0, "No redirect entries found in config file %s", ckp.config); + if (!ckp.zmqblock) + ckp.zmqblock = "tcp://127.0.0.1:28332"; + + /* Create the log directory */ + trail_slash(&ckp.logdir); + ret = mkdir(ckp.logdir, 0750); + if (ret && errno != EEXIST) + quit(1, "Failed to make log directory %s", ckp.logdir); + + /* Create the user logdir */ + sprintf(buf, "%s/users", ckp.logdir); + ret = mkdir(buf, 0750); + if (ret && errno != EEXIST) + quit(1, "Failed to make user log directory %s", buf); + + /* Create the pool logdir */ + sprintf(buf, "%s/pool", ckp.logdir); + ret = mkdir(buf, 0750); + if (ret && errno != EEXIST) + quit(1, "Failed to make pool log directory %s", buf); + + /* Create the logfile */ + ASPRINTF(&ckp.logfilename, "%s%s.log", ckp.logdir, ckp.name); + if (!open_logfile(&ckp)) + quit(1, "Failed to make open log file %s", buf); + launch_logger(&ckp); + + ckp.main.ckp = &ckp; + ckp.main.processname = strdup("main"); + ckp.main.sockname = strdup("listener"); + name_process_sockname(&ckp.main.us, &ckp.main); + ckp.oldconnfd = ckzalloc(sizeof(int *) * ckp.serverurls); + manage_old_instance(&ckp, &ckp.main); + if (ckp.handover) { + const char *path = ckp.main.us.path; + + if (send_recv_path(path, "ping")) { + for (i = 0; i < ckp.serverurls; i++) { + char oldurl[INET6_ADDRSTRLEN], oldport[8]; + char getfd[16]; + int sockd; + + snprintf(getfd, 15, "getxfd%d", i); + sockd = open_unix_client(path); + if (sockd < 1) + break; + if (!send_unix_msg(sockd, getfd)) + break; + ckp.oldconnfd[i] = get_fd(sockd); + Close(sockd); + sockd = ckp.oldconnfd[i]; + if (!sockd) + break; + if (url_from_socket(sockd, oldurl, oldport)) { + LOGWARNING("Inherited old server socket %d url %s:%s !", + i, oldurl, oldport); + } else { + LOGWARNING("Inherited old server socket %d with new file descriptor %d!", + i, ckp.oldconnfd[i]); + } + } + send_recv_path(path, "reject"); + send_recv_path(path, "reconnect"); + send_recv_path(path, "shutdown"); + } + } + + if (ckp.daemon) { + int fd; + + if (fork()) + exit(0); + setsid(); + fd = open("/dev/null",O_RDWR, 0); + if (fd != -1) { + dup2(fd, STDIN_FILENO); + dup2(fd, STDOUT_FILENO); + dup2(fd, STDERR_FILENO); + } + } + + write_namepid(&ckp.main); + open_process_sock(&ckp, &ckp.main, &ckp.main.us); + + ret = sysconf(_SC_OPEN_MAX); + if (ckp.maxclients > ret * 9 / 10) { + LOGWARNING("Cannot set maxclients to %d due to max open file limit of %d, reducing to %d", + ckp.maxclients, ret, ret * 9 / 10); + ckp.maxclients = ret * 9 / 10; + } else if (!ckp.maxclients) { + LOGNOTICE("Setting maxclients to %d due to max open file limit of %d", + ret * 9 / 10, ret); + ckp.maxclients = ret * 9 / 10; + } + + // ckp.ckpapi = create_ckmsgq(&ckp, "api", &ckpool_api); + create_pthread(&ckp.pth_listener, listener, &ckp.main); + + handler.sa_handler = &sighandler; + handler.sa_flags = 0; + sigemptyset(&handler.sa_mask); + sigaction(SIGTERM, &handler, NULL); + sigaction(SIGINT, &handler, NULL); + + /* Launch separate processes from here */ + prepare_child(&ckp, &ckp.generator, generator, "generator"); + prepare_child(&ckp, &ckp.stratifier, stratifier, "stratifier"); + prepare_child(&ckp, &ckp.connector, connector, "connector"); + + /* Shutdown from here if the listener is sent a shutdown message */ + if (ckp.pth_listener) + join_pthread(ckp.pth_listener); + + clean_up(&ckp); + + return 0; +} diff --git a/solo-ckpool-source/src/ckpool.h b/solo-ckpool-source/src/ckpool.h new file mode 100644 index 0000000..6ee68e8 --- /dev/null +++ b/solo-ckpool-source/src/ckpool.h @@ -0,0 +1,404 @@ +/* + * Copyright 2014-2018,2023 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#ifndef CKPOOL_H +#define CKPOOL_H + +#include "config.h" + +#include +#include +#include + +#include "libckpool.h" +#include "uthash.h" + +#define RPC_TIMEOUT 60 + +struct ckpool_instance; +typedef struct ckpool_instance ckpool_t; + +struct ckmsg { + struct ckmsg *next; + struct ckmsg *prev; + void *data; +}; + +typedef struct ckmsg ckmsg_t; + +typedef struct unix_msg unix_msg_t; + +struct unix_msg { + unix_msg_t *next; + unix_msg_t *prev; + int sockd; + char *buf; +}; + +struct ckmsgq { + ckpool_t *ckp; + char name[16]; + pthread_t pth; + mutex_t *lock; + pthread_cond_t *cond; + ckmsg_t *msgs; + void (*func)(ckpool_t *, void *); + int64_t messages; + bool active; +}; + +typedef struct ckmsgq ckmsgq_t; + +typedef struct proc_instance proc_instance_t; + +struct proc_instance { + ckpool_t *ckp; + unixsock_t us; + char *processname; + char *sockname; + int pid; + int oldpid; + pthread_t pth_process; + + /* Linked list of received messages, locking and conditional */ + unix_msg_t *unix_msgs; + mutex_t rmsg_lock; + pthread_cond_t rmsg_cond; +}; + +struct connsock { + int fd; + char *url; + char *port; + char *auth; + + char *buf; + int bufofs; + int buflen; + int bufsize; + int rcvbufsiz; + int sendbufsiz; + + ckpool_t *ckp; + /* Semaphore used to serialise request/responses */ + sem_t sem; + + bool alive; +}; + +typedef struct connsock connsock_t; + +typedef struct char_entry char_entry_t; + +struct char_entry { + char_entry_t *next; + char_entry_t *prev; + char *buf; +}; + +typedef struct log_entry log_entry_t; + +struct log_entry { + log_entry_t *next; + log_entry_t *prev; + char *fname; + char *buf; +}; + +struct server_instance { + /* Hash table data */ + UT_hash_handle hh; + int id; + + char *url; + char *auth; + char *pass; + bool notify; + bool alive; + connsock_t cs; +}; + +typedef struct server_instance server_instance_t; + +struct ckpool_instance { + /* Start time */ + time_t starttime; + /* Start pid */ + pid_t startpid; + /* The initial command line arguments */ + char **initial_args; + /* Number of arguments */ + int args; + /* Filename of config file */ + char *config; + /* Kill old instance with same name */ + bool killold; + /* Whether to log shares or not */ + bool logshares; + /* Logging level */ + int loglevel; + /* Main process name */ + char *name; + /* Directory where sockets are created */ + char *socket_dir; + /* Group ID for unix sockets */ + char *grpnam; + gid_t gr_gid; + /* Directory where logs are written */ + char *logdir; + /* Logfile */ + char *logfilename; + FILE *logfp; + int logfd; + time_t lastopen_t; + /* Connector fds if we inherit them from a running process */ + int *oldconnfd; + /* Should we inherit a running instance's socket and shut it down */ + bool handover; + /* How many clients maximum to accept before rejecting further */ + int maxclients; + + /* API message queue */ + ckmsgq_t *ckpapi; + + /* Logger message queue */ + ckmsgq_t *logger; + ckmsgq_t *console_logger; + + /* Process instance data of parent/child processes */ + proc_instance_t main; + + proc_instance_t generator; + proc_instance_t stratifier; + proc_instance_t connector; + + bool generator_ready; + bool stratifier_ready; + bool connector_ready; + + /* Name of protocol used for ZMQ block notifications */ + char *zmqblock; + + /* Threads of main process */ + pthread_t pth_listener; + pthread_t pth_watchdog; + + /* Are we running in trusted remote node mode */ + bool remote; + + /* Are we running in node proxy mode */ + bool node; + + /* Are we running in passthrough mode */ + bool passthrough; + + /* Are we a redirecting passthrough */ + bool redirector; + + /* Are we running as a proxy */ + bool proxy; + + /* Are we running in btcsolo mode */ + bool btcsolo; + + /* Are we running in userproxy mode */ + bool userproxy; + + /* Should we daemonise the ckpool process */ + bool daemon; + + /* Should we disable the throbber */ + bool quiet; + + /* Have we given warnings about the inability to raise buf sizes */ + bool wmem_warn; + bool rmem_warn; + + /* Bitcoind data */ + int btcds; + char **btcdurl; + char **btcdauth; + char **btcdpass; + bool *btcdnotify; + int blockpoll; // How frequently in ms to poll bitcoind for block updates + int nonce1length; // Extranonce1 length + int nonce2length; // Extranonce2 length + + /* Difficulty settings */ + double mindiff; // Default 1.0 (supports fractional values) + double startdiff; // Default 42.0 (supports fractional values) + double highdiff; // Default 1000000.0 (supports fractional values) + double maxdiff; // No default (supports fractional values) + + /* Coinbase data */ + char *btcaddress; // Address to mine to + bool script; // Address is a script address + bool segwit; // Address is a segwit address + char *btcsig; // Optional signature to add to coinbase + bool coinbase_valid; // Coinbase transaction confirmed valid + + /* Donation data */ + char *donaddress; // Donation address + char *tndonaddress; // Testnet donation address + char *rtdonaddress; // Regtest donation address + bool donscript; // Donation is a script + bool donsegwit; // Donation is segwit + bool donvalid; // Donation address works on this network + double donation; // Percentage donation to development + + /* Stratum options */ + server_instance_t **servers; + char **serverurl; // Array of URLs to bind our server/proxy to + int serverurls; // Number of server bindings + bool *server_highdiff; // If this server is highdiff + bool *nodeserver; // If this server URL serves node information + int nodeservers; // If this server has remote node servers + bool *trusted; // If this server URL accepts trusted remote nodes + char *upstream; // Upstream pool in trusted remote mode + + int update_interval; // Seconds between stratum updates + + uint32_t version_mask; // Bits which set to true means allow miner to modify those bits + + /* Proxy options */ + int proxies; + char **proxyurl; + char **proxyauth; + char **proxypass; + + /* Passthrough redirect options */ + int redirecturls; + char **redirecturl; + char **redirectport; + + /* Private data for each process */ + void *gdata; + void *sdata; + void *cdata; +}; + +enum stratum_msgtype { + SM_RECONNECT = 0, + SM_DIFF, + SM_MSG, + SM_UPDATE, + SM_ERROR, + SM_SUBSCRIBE, + SM_SUBSCRIBERESULT, + SM_SHARE, + SM_SHARERESULT, + SM_AUTH, + SM_AUTHRESULT, + SM_TXNS, + SM_TXNSRESULT, + SM_PING, + SM_WORKINFO, + SM_SUGGESTDIFF, + SM_BLOCK, + SM_PONG, + SM_TRANSACTIONS, + SM_SHAREERR, + SM_WORKERSTATS, + SM_REQTXNS, + SM_CONFIGURE, + SM_NONE +}; + +static const char __maybe_unused *stratum_msgs[] = { + "reconnect", + "diff", + "message", + "update", + "error", + "subscribe", + "subscribe.result", + "share", + "share.result", + "auth", + "auth.result", + "txns", + "txns.result", + "ping", + "workinfo", + "suggestdiff", + "block", + "pong", + "transactions", + "shareerr", + "workerstats", + "reqtxns", + "mining.configure", + "" +}; + +#define SAFE_HASH_OVERHEAD(HASHLIST) (HASHLIST ? HASH_OVERHEAD(hh, HASHLIST) : 0) + +void get_timestamp(char *stamp); + +ckmsgq_t *create_ckmsgq(ckpool_t *ckp, const char *name, const void *func); +ckmsgq_t *create_ckmsgqs(ckpool_t *ckp, const char *name, const void *func, const int count); +bool _ckmsgq_add(ckmsgq_t *ckmsgq, void *data, const char *file, const char *func, const int line); +#define ckmsgq_add(ckmsgq, data) _ckmsgq_add(ckmsgq, data, __FILE__, __func__, __LINE__) +bool ckmsgq_empty(ckmsgq_t *ckmsgq); +unix_msg_t *get_unix_msg(proc_instance_t *pi); + +bool ping_main(ckpool_t *ckp); +void empty_buffer(connsock_t *cs); +int set_sendbufsize(ckpool_t *ckp, const int fd, const int len); +int set_recvbufsize(ckpool_t *ckp, const int fd, const int len); +int read_socket_line(connsock_t *cs, float *timeout); +void _queue_proc(proc_instance_t *pi, const char *msg, const char *file, const char *func, const int line); +#define send_proc(pi, msg) _queue_proc(&(pi), msg, __FILE__, __func__, __LINE__) +char *_send_recv_proc(const proc_instance_t *pi, const char *msg, int writetimeout, int readtimedout, + const char *file, const char *func, const int line); +#define send_recv_proc(pi, msg) _send_recv_proc(&(pi), msg, UNIX_WRITE_TIMEOUT, UNIX_READ_TIMEOUT, __FILE__, __func__, __LINE__) +char *_send_recv_ckdb(const ckpool_t *ckp, const char *msg, const char *file, const char *func, const int line); +#define send_recv_ckdb(ckp, msg) _send_recv_ckdb(ckp, msg, __FILE__, __func__, __LINE__) +char *_ckdb_msg_call(const ckpool_t *ckp, const char *msg, const char *file, const char *func, + const int line); +#define ckdb_msg_call(ckp, msg) _ckdb_msg_call(ckp, msg, __FILE__, __func__, __LINE__) + +json_t *json_rpc_call(connsock_t *cs, const char *rpc_req); +json_t *json_rpc_response(connsock_t *cs, const char *rpc_req); +void json_rpc_msg(connsock_t *cs, const char *rpc_req); +bool _send_json_msg(connsock_t *cs, const json_t *json_msg, const char *file, const char *func, const int line); +#define send_json_msg(CS, JSON_MSG) _send_json_msg(CS, JSON_MSG, __FILE__, __func__, __LINE__) +json_t *json_msg_result(const char *msg, json_t **res_val, json_t **err_val); + +bool json_get_string(char **store, const json_t *val, const char *res); +bool json_get_int64(int64_t *store, const json_t *val, const char *res); +bool json_get_int(int *store, const json_t *val, const char *res); +bool json_get_double(double *store, const json_t *val, const char *res); +bool json_get_uint32(uint32_t *store, const json_t *val, const char *res); +bool json_get_bool(bool *store, const json_t *val, const char *res); +bool json_getdel_int(int *store, json_t *val, const char *res); +bool json_getdel_int64(int64_t *store, json_t *val, const char *res); + + +/* API Placeholders for future API implementation */ +typedef struct apimsg apimsg_t; + +struct apimsg { + char *buf; + int sockd; +}; + +static inline void ckpool_api(ckpool_t __maybe_unused *ckp, apimsg_t __maybe_unused *apimsg) {}; +static inline json_t *json_encode_errormsg(json_error_t __maybe_unused *err_val) { return NULL; }; +static inline json_t *json_errormsg(const char __maybe_unused *fmt, ...) { return NULL; }; +static inline void send_api_response(json_t __maybe_unused *val, const int __maybe_unused sockd) {}; + +/* Subclients have client_ids in the high bits. Returns the value of the parent + * client if one exists. */ +static inline int64_t subclient(const int64_t client_id) +{ + return (client_id >> 32); +} + +#endif /* CKPOOL_H */ diff --git a/solo-ckpool-source/src/connector.c b/solo-ckpool-source/src/connector.c new file mode 100644 index 0000000..2b6ea4e --- /dev/null +++ b/solo-ckpool-source/src/connector.c @@ -0,0 +1,1667 @@ +/* + * Copyright 2014-2017 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include +#include +#include +#include +#include +#include + +#include "ckpool.h" +#include "libckpool.h" +#include "uthash.h" +#include "utlist.h" +#include "stratifier.h" +#include "generator.h" + +#define MAX_MSGSIZE 1024 + +typedef struct client_instance client_instance_t; +typedef struct sender_send sender_send_t; +typedef struct share share_t; +typedef struct redirect redirect_t; + +struct client_instance { + /* For clients hashtable */ + UT_hash_handle hh; + int64_t id; + + /* fd cannot be changed while a ref is held */ + int fd; + + /* Reference count for when this instance is used outside of the + * connector_data lock */ + int ref; + + /* Have we disabled this client to be removed when there are no refs? */ + bool invalid; + + /* For dead_clients list */ + client_instance_t *dead_next; + client_instance_t *dead_prev; + + client_instance_t *recycled_next; + client_instance_t *recycled_prev; + + + struct sockaddr_storage address_storage; + struct sockaddr *address; + char address_name[INET6_ADDRSTRLEN]; + + /* Which serverurl is this instance connected to */ + int server; + + char *buf; + unsigned long bufofs; + + /* Are we currently sending a blocked message from this client */ + sender_send_t *sending; + + /* Is this a trusted remote server */ + bool remote; + + /* Is this the parent passthrough client */ + bool passthrough; + + /* Linked list of shares in redirector mode.*/ + share_t *shares; + + /* Has this client already been told to redirect */ + bool redirected; + /* Has this client been authorised in redirector mode */ + bool authorised; + + /* Time this client started blocking, 0 when not blocked */ + time_t blocked_time; + + /* The size of the socket send buffer */ + int sendbufsize; +}; + +struct sender_send { + struct sender_send *next; + struct sender_send *prev; + + client_instance_t *client; + char *buf; + int len; + int ofs; +}; + +struct share { + share_t *next; + share_t *prev; + + time_t submitted; + int64_t id; +}; + +struct redirect { + UT_hash_handle hh; + char address_name[INET6_ADDRSTRLEN]; + int id; + int redirect_no; +}; + +/* Private data for the connector */ +struct connector_data { + ckpool_t *ckp; + cklock_t lock; + proc_instance_t *pi; + + time_t start_time; + + /* Array of server fds */ + int *serverfd; + /* All time count of clients connected */ + int nfds; + /* The epoll fd */ + int epfd; + + bool accept; + pthread_t pth_sender; + pthread_t pth_receiver; + + /* For the hashtable of all clients */ + client_instance_t *clients; + /* Linked list of dead clients no longer in use but may still have references */ + client_instance_t *dead_clients; + /* Linked list of client structures we can reuse */ + client_instance_t *recycled_clients; + + int clients_generated; + int dead_generated; + + int64_t client_ids; + + /* client message process queue */ + ckmsgq_t *cmpq; + + /* client message event process queue */ + ckmsgq_t *cevents; + + /* For the linked list of pending sends */ + sender_send_t *sender_sends; + + int64_t sends_generated; + int64_t sends_delayed; + int64_t sends_queued; + int64_t sends_size; + + /* For protecting the pending sends list */ + mutex_t sender_lock; + pthread_cond_t sender_cond; + + /* Hash list of all redirected IP address in redirector mode */ + redirect_t *redirects; + /* What redirect we're currently up to */ + int redirect; + + /* Pending sends to the upstream server */ + ckmsgq_t *upstream_sends; + connsock_t upstream_cs; + + /* Have we given the warning about inability to raise sendbuf size */ + bool wmem_warn; +}; + +typedef struct connector_data cdata_t; + +void connector_upstream_msg(ckpool_t *ckp, char *msg) +{ + cdata_t *cdata = ckp->cdata; + + LOGDEBUG("Upstreaming %s", msg); + ckmsgq_add(cdata->upstream_sends, msg); +} + +/* Increase the reference count of instance */ +static void __inc_instance_ref(client_instance_t *client) +{ + client->ref++; +} + +static void inc_instance_ref(cdata_t *cdata, client_instance_t *client) +{ + ck_wlock(&cdata->lock); + __inc_instance_ref(client); + ck_wunlock(&cdata->lock); +} + +/* Increase the reference count of instance */ +static void __dec_instance_ref(client_instance_t *client) +{ + client->ref--; +} + +static void dec_instance_ref(cdata_t *cdata, client_instance_t *client) +{ + ck_wlock(&cdata->lock); + __dec_instance_ref(client); + ck_wunlock(&cdata->lock); +} + +/* Recruit a client structure from a recycled one if available, creating a + * new structure only if we have none to reuse. */ +static client_instance_t *recruit_client(cdata_t *cdata) +{ + client_instance_t *client = NULL; + + ck_wlock(&cdata->lock); + if (cdata->recycled_clients) { + client = cdata->recycled_clients; + DL_DELETE2(cdata->recycled_clients, client, recycled_prev, recycled_next); + } else + cdata->clients_generated++; + ck_wunlock(&cdata->lock); + + if (!client) { + LOGDEBUG("Connector created new client instance"); + client = ckzalloc(sizeof(client_instance_t)); + } else + LOGDEBUG("Connector recycled client instance"); + + client->buf = ckzalloc(PAGESIZE); + + return client; +} + +static void __recycle_client(cdata_t *cdata, client_instance_t *client) +{ + dealloc(client->buf); + memset(client, 0, sizeof(client_instance_t)); + client->id = -1; + DL_APPEND2(cdata->recycled_clients, client, recycled_prev, recycled_next); +} + +static void recycle_client(cdata_t *cdata, client_instance_t *client) +{ + ck_wlock(&cdata->lock); + __recycle_client(cdata, client); + ck_wunlock(&cdata->lock); +} + +/* Allows the stratifier to get a unique local virtualid for subclients */ +int64_t connector_newclientid(ckpool_t *ckp) +{ + int64_t ret; + + cdata_t *cdata = ckp->cdata; + + ck_wlock(&cdata->lock); + ret = cdata->client_ids++; + ck_wunlock(&cdata->lock); + + return ret; +} + +/* Accepts incoming connections on the server socket and generates client + * instances */ +static int accept_client(cdata_t *cdata, const int epfd, const uint64_t server) +{ + int fd, port, no_clients, sockd; + ckpool_t *ckp = cdata->ckp; + client_instance_t *client; + struct epoll_event event; + socklen_t address_len; + socklen_t optlen; + + ck_rlock(&cdata->lock); + no_clients = HASH_COUNT(cdata->clients); + ck_runlock(&cdata->lock); + + if (unlikely(ckp->maxclients && no_clients >= ckp->maxclients)) { + LOGWARNING("Server full with %d clients", no_clients); + return 0; + } + + sockd = cdata->serverfd[server]; + client = recruit_client(cdata); + client->server = server; + client->address = (struct sockaddr *)&client->address_storage; + address_len = sizeof(client->address_storage); + fd = accept(sockd, client->address, &address_len); + if (unlikely(fd < 0)) { + /* Handle these errors gracefully should we ever share this + * socket */ + if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ECONNABORTED) { + LOGERR("Recoverable error on accept in accept_client"); + return 0; + } + LOGERR("Failed to accept on socket %d in acceptor", sockd); + recycle_client(cdata, client); + return -1; + } + + switch (client->address->sa_family) { + const struct sockaddr_in *inet4_in; + const struct sockaddr_in6 *inet6_in; + + case AF_INET: + inet4_in = (struct sockaddr_in *)client->address; + inet_ntop(AF_INET, &inet4_in->sin_addr, client->address_name, INET6_ADDRSTRLEN); + port = htons(inet4_in->sin_port); + break; + case AF_INET6: + inet6_in = (struct sockaddr_in6 *)client->address; + inet_ntop(AF_INET6, &inet6_in->sin6_addr, client->address_name, INET6_ADDRSTRLEN); + port = htons(inet6_in->sin6_port); + break; + default: + LOGWARNING("Unknown INET type for client %d on socket %d", + cdata->nfds, fd); + Close(fd); + recycle_client(cdata, client); + return 0; + } + + keep_sockalive(fd); + noblock_socket(fd); + + LOGINFO("Connected new client %d on socket %d to %d active clients from %s:%d", + cdata->nfds, fd, no_clients, client->address_name, port); + + ck_wlock(&cdata->lock); + client->id = cdata->client_ids++; + HASH_ADD_I64(cdata->clients, id, client); + cdata->nfds++; + ck_wunlock(&cdata->lock); + + /* We increase the ref count on this client as epoll creates a pointer + * to it. We drop that reference when the socket is closed which + * removes it automatically from the epoll list. */ + __inc_instance_ref(client); + client->fd = fd; + optlen = sizeof(client->sendbufsize); + getsockopt(fd, SOL_SOCKET, SO_RCVBUF, &client->sendbufsize, &optlen); + LOGDEBUG("Client sendbufsize detected as %d", client->sendbufsize); + + event.data.u64 = client->id; + event.events = EPOLLIN | EPOLLRDHUP | EPOLLONESHOT; + if (unlikely(epoll_ctl(epfd, EPOLL_CTL_ADD, fd, &event) < 0)) { + LOGERR("Failed to epoll_ctl add in accept_client"); + dec_instance_ref(cdata, client); + return 0; + } + + return 1; +} + +static int __drop_client(cdata_t *cdata, client_instance_t *client) +{ + int ret = -1; + + if (client->invalid) + goto out; + client->invalid = true; + ret = client->fd; + /* Closing the fd will automatically remove it from the epoll list */ + Close(client->fd); + HASH_DEL(cdata->clients, client); + DL_APPEND2(cdata->dead_clients, client, dead_prev, dead_next); + /* This is the reference to this client's presence in the + * epoll list. */ + __dec_instance_ref(client); + cdata->dead_generated++; +out: + return ret; +} + +static void stratifier_drop_id(ckpool_t *ckp, const int64_t id) +{ + char buf[256]; + + sprintf(buf, "dropclient=%"PRId64, id); + send_proc(ckp->stratifier, buf); +} + +/* Client must hold a reference count */ +static int drop_client(cdata_t *cdata, client_instance_t *client) +{ + bool passthrough = client->passthrough, remote = client->remote; + char address_name[INET6_ADDRSTRLEN]; + int64_t client_id = client->id; + int fd = -1; + + strcpy(address_name, client->address_name); + ck_wlock(&cdata->lock); + fd = __drop_client(cdata, client); + ck_wunlock(&cdata->lock); + + if (fd > -1) { + if (passthrough) { + LOGNOTICE("Connector dropped passthrough %"PRId64" %s", + client_id, address_name); + } else if (remote) { + LOGWARNING("Remote trusted server client %"PRId64" %s disconnected", + client_id, address_name); + } + LOGDEBUG("Connector dropped fd %d", fd); + stratifier_drop_id(cdata->ckp, client_id); + } + + return fd; +} + +/* For sending the drop command to the upstream pool in passthrough mode */ +static void generator_drop_client(ckpool_t *ckp, const client_instance_t *client) +{ + json_t *val; + + JSON_CPACK(val, "{si,sI:ss:si:ss:s[]}", "id", 42, "client_id", client->id, "address", + client->address_name, "server", client->server, "method", "mining.term", + "params"); + generator_add_send(ckp, val); +} + +static void stratifier_drop_client(ckpool_t *ckp, const client_instance_t *client) +{ + stratifier_drop_id(ckp, client->id); +} + +/* Invalidate this instance. Remove them from the hashtables we look up + * regularly but keep the instances in a linked list until their ref count + * drops to zero when we can remove them lazily. Client must hold a reference + * count. */ +static int invalidate_client(ckpool_t *ckp, cdata_t *cdata, client_instance_t *client) +{ + client_instance_t *tmp; + int ret; + + ret = drop_client(cdata, client); + if ((!ckp->passthrough || ckp->node) && !client->passthrough) + stratifier_drop_client(ckp, client); + if (ckp->passthrough) + generator_drop_client(ckp, client); + + /* Cull old unused clients lazily when there are no more reference + * counts for them. */ + ck_wlock(&cdata->lock); + DL_FOREACH_SAFE2(cdata->dead_clients, client, tmp, dead_next) { + if (!client->ref) { + DL_DELETE2(cdata->dead_clients, client, dead_prev, dead_next); + LOGINFO("Connector recycling client %"PRId64, client->id); + /* We only close the client fd once we're sure there + * are no references to it left to prevent fds being + * reused on new and old clients. */ + nolinger_socket(client->fd); + Close(client->fd); + __recycle_client(cdata, client); + } + } + ck_wunlock(&cdata->lock); + + return ret; +} + +static void drop_all_clients(cdata_t *cdata) +{ + client_instance_t *client, *tmp; + + ck_wlock(&cdata->lock); + HASH_ITER(hh, cdata->clients, client, tmp) { + __drop_client(cdata, client); + } + ck_wunlock(&cdata->lock); +} + +static void send_client(ckpool_t *ckp, cdata_t *cdata, int64_t id, char *buf); + +/* Look for shares being submitted via a redirector and add them to a linked + * list for looking up the responses. */ +static void parse_redirector_share(cdata_t *cdata, client_instance_t *client, const json_t *val) +{ + share_t *share, *tmp; + time_t now; + int64_t id; + + if (!json_get_int64(&id, val, "id")) { + LOGNOTICE("Failed to find redirector share id"); + return; + } + share = ckzalloc(sizeof(share_t)); + now = time(NULL); + share->submitted = now; + share->id = id; + + LOGINFO("Redirector adding client %"PRId64" share id: %"PRId64, client->id, id); + + /* We use the cdata lock instead of a separate lock since this function + * is called infrequently. */ + ck_wlock(&cdata->lock); + DL_APPEND(client->shares, share); + + /* Age old shares. */ + DL_FOREACH_SAFE(client->shares, share, tmp) { + if (now > share->submitted + 120) { + DL_DELETE(client->shares, share); + dealloc(share); + } + } + ck_wunlock(&cdata->lock); +} + +/* Client is holding a reference count from being on the epoll list. Returns + * true if we will still be receiving messages from this client. */ +static bool parse_client_msg(ckpool_t *ckp, cdata_t *cdata, client_instance_t *client) +{ + int buflen, ret; + json_t *val; + char *eol; + +retry: + if (unlikely(client->bufofs > MAX_MSGSIZE)) { + if (!client->remote) { + LOGNOTICE("Client id %"PRId64" fd %d overloaded buffer without EOL, disconnecting", + client->id, client->fd); + return false; + } + client->buf = realloc(client->buf, round_up_page(client->bufofs + MAX_MSGSIZE + 1)); + } + /* This read call is non-blocking since the socket is set to O_NOBLOCK */ + ret = read(client->fd, client->buf + client->bufofs, MAX_MSGSIZE); + if (ret < 1) { + if (likely(errno == EAGAIN || errno == EWOULDBLOCK || !ret)) + return true; + LOGINFO("Client id %"PRId64" fd %d disconnected - recv fail with bufofs %lu ret %d errno %d %s", + client->id, client->fd, client->bufofs, ret, errno, ret && errno ? strerror(errno) : ""); + return false; + } + client->bufofs += ret; +reparse: + eol = memchr(client->buf, '\n', client->bufofs); + if (!eol) + goto retry; + + /* Do something useful with this message now */ + buflen = eol - client->buf + 1; + if (unlikely(buflen > MAX_MSGSIZE && !client->remote)) { + LOGNOTICE("Client id %"PRId64" fd %d message oversize, disconnecting", client->id, client->fd); + return false; + } + + if (!(val = json_loads(client->buf, JSON_DISABLE_EOF_CHECK, NULL))) { + char *buf = strdup("Invalid JSON, disconnecting\n"); + + LOGINFO("Client id %"PRId64" sent invalid json message %s", client->id, client->buf); + send_client(ckp, cdata, client->id, buf); + return false; + } else { + if (client->passthrough) { + int64_t passthrough_id; + + json_getdel_int64(&passthrough_id, val, "client_id"); + passthrough_id = (client->id << 32) | passthrough_id; + json_object_set_new_nocheck(val, "client_id", json_integer(passthrough_id)); + } else { + if (ckp->redirector && !client->redirected && strstr(client->buf, "mining.submit")) + parse_redirector_share(cdata, client, val); + json_object_set_new_nocheck(val, "client_id", json_integer(client->id)); + json_object_set_new_nocheck(val, "address", json_string(client->address_name)); + } + json_object_set_new_nocheck(val, "server", json_integer(client->server)); + + /* Do not send messages of clients we've already dropped. We + * do this unlocked as the occasional false negative can be + * filtered by the stratifier. */ + if (likely(!client->invalid)) { + if (!ckp->passthrough) + stratifier_add_recv(ckp, val); + if (ckp->node) + stratifier_add_recv(ckp, json_deep_copy(val)); + if (ckp->passthrough) + generator_add_send(ckp, val); + } else + json_decref(val); + } + client->bufofs -= buflen; + if (client->bufofs) + memmove(client->buf, client->buf + buflen, client->bufofs); + client->buf[client->bufofs] = '\0'; + + if (client->bufofs) + goto reparse; + goto retry; +} + +static client_instance_t *ref_client_by_id(cdata_t *cdata, int64_t id) +{ + client_instance_t *client; + + ck_wlock(&cdata->lock); + HASH_FIND_I64(cdata->clients, &id, client); + if (client) { + if (!client->invalid) + __inc_instance_ref(client); + else + client = NULL; + } + ck_wunlock(&cdata->lock); + + return client; +} + +static void redirect_client(ckpool_t *ckp, client_instance_t *client); + +static bool redirect_matches(cdata_t *cdata, client_instance_t *client) +{ + redirect_t *redirect; + + ck_rlock(&cdata->lock); + HASH_FIND_STR(cdata->redirects, client->address_name, redirect); + ck_runlock(&cdata->lock); + + return redirect; +} + +static void client_event_processor(ckpool_t *ckp, struct epoll_event *event) +{ + const uint32_t events = event->events; + const uint64_t id = event->data.u64; + cdata_t *cdata = ckp->cdata; + client_instance_t *client; + + client = ref_client_by_id(cdata, id); + if (unlikely(!client)) { + LOGNOTICE("Failed to find client by id %"PRId64" in receiver!", id); + goto outnoclient; + } + /* We can have both messages and read hang ups so process the + * message first. */ + if (likely(events & EPOLLIN)) { + /* Rearm the client for epoll events if we have successfully + * parsed a message from it */ + if (unlikely(!parse_client_msg(ckp, cdata, client))) { + invalidate_client(ckp, cdata, client); + goto out; + } + } + if (unlikely(events & EPOLLERR)) { + socklen_t errlen = sizeof(int); + int error = 0; + + /* See what type of error this is and raise the log + * level of the message if it's unexpected. */ + getsockopt(client->fd, SOL_SOCKET, SO_ERROR, (void *)&error, &errlen); + if (error != 104) { + LOGNOTICE("Client id %"PRId64" fd %d epollerr HUP in epoll with errno %d: %s", + client->id, client->fd, error, strerror(error)); + } else { + LOGINFO("Client id %"PRId64" fd %d epollerr HUP in epoll with errno %d: %s", + client->id, client->fd, error, strerror(error)); + } + invalidate_client(cdata->pi->ckp, cdata, client); + } else if (unlikely(events & EPOLLHUP)) { + /* Client connection reset by peer */ + LOGINFO("Client id %"PRId64" fd %d HUP in epoll", client->id, client->fd); + invalidate_client(cdata->pi->ckp, cdata, client); + } else if (unlikely(events & EPOLLRDHUP)) { + /* Client disconnected by peer */ + LOGINFO("Client id %"PRId64" fd %d RDHUP in epoll", client->id, client->fd); + invalidate_client(cdata->pi->ckp, cdata, client); + } +out: + if (likely(!client->invalid)) { + /* Rearm the fd in the epoll list if it's still active */ + event->data.u64 = id; + event->events = EPOLLIN | EPOLLRDHUP | EPOLLONESHOT; + epoll_ctl(cdata->epfd, EPOLL_CTL_MOD, client->fd, event); + } + dec_instance_ref(cdata, client); +outnoclient: + free(event); +} + +/* Waits on fds ready to read on from the list stored in conn_instance and + * handles the incoming messages */ +static void *receiver(void *arg) +{ + cdata_t *cdata = (cdata_t *)arg; + struct epoll_event *event = ckzalloc(sizeof(struct epoll_event)); + ckpool_t *ckp = cdata->ckp; + uint64_t serverfds, i; + int ret, epfd; + + rename_proc("creceiver"); + + epfd = cdata->epfd = epoll_create1(EPOLL_CLOEXEC); + if (epfd < 0) { + LOGEMERG("FATAL: Failed to create epoll in receiver"); + goto out; + } + serverfds = ckp->serverurls; + /* Add all the serverfds to the epoll */ + for (i = 0; i < serverfds; i++) { + /* The small values will be less than the first client ids */ + event->data.u64 = i; + event->events = EPOLLIN | EPOLLRDHUP; + ret = epoll_ctl(epfd, EPOLL_CTL_ADD, cdata->serverfd[i], event); + if (ret < 0) { + LOGEMERG("FATAL: Failed to add epfd %d to epoll_ctl", epfd); + goto out; + } + } + + /* Wait for the stratifier to be ready for us */ + while (!ckp->stratifier_ready) + cksleep_ms(10); + + while (42) { + uint64_t edu64; + + while (unlikely(!cdata->accept)) + cksleep_ms(10); + ret = epoll_wait(epfd, event, 1, 1000); + if (unlikely(ret < 1)) { + if (unlikely(ret == -1)) { + LOGEMERG("FATAL: Failed to epoll_wait in receiver"); + break; + } + /* Nothing to service, still very unlikely */ + continue; + } + edu64 = event->data.u64; + if (edu64 < serverfds) { + ret = accept_client(cdata, epfd, edu64); + if (unlikely(ret < 0)) { + LOGEMERG("FATAL: Failed to accept_client in receiver"); + break; + } + continue; + } + /* Event structure is handed off to client_event_processor + * here to be freed so we need to allocate a new one */ + ckmsgq_add(cdata->cevents, event); + event = ckzalloc(sizeof(struct epoll_event)); + } +out: + /* We shouldn't get here unless there's an error */ + return NULL; +} + +/* Send a sender_send message and return true if we've finished sending it or + * are unable to send any more. */ +static bool send_sender_send(ckpool_t *ckp, cdata_t *cdata, sender_send_t *sender_send) +{ + client_instance_t *client = sender_send->client; + time_t now_t; + + if (unlikely(client->invalid)) + goto out_true; + + /* Make sure we only send one message at a time to each client */ + if (unlikely(client->sending && client->sending != sender_send)) + return false; + + client->sending = sender_send; + now_t = time(NULL); + + /* Increase sendbufsize to match large messages sent to clients - this + * usually only applies to clients as mining nodes. */ + if (unlikely(!ckp->wmem_warn && sender_send->len > client->sendbufsize)) + client->sendbufsize = set_sendbufsize(ckp, client->fd, sender_send->len); + + while (sender_send->len) { + int ret = write(client->fd, sender_send->buf + sender_send->ofs, sender_send->len); + + if (ret < 1) { + /* Invalidate clients that block for more than 60 seconds */ + if (unlikely(client->blocked_time && now_t - client->blocked_time >= 60)) { + LOGNOTICE("Client id %"PRId64" fd %d blocked for >60 seconds, disconnecting", + client->id, client->fd); + invalidate_client(ckp, cdata, client); + goto out_true; + } + if (errno == EAGAIN || errno == EWOULDBLOCK || !ret) { + if (!client->blocked_time) + client->blocked_time = now_t; + return false; + } + LOGINFO("Client id %"PRId64" fd %d disconnected with write errno %d:%s", + client->id, client->fd, errno, strerror(errno)); + invalidate_client(ckp, cdata, client); + goto out_true; + } + sender_send->ofs += ret; + sender_send->len -= ret; + client->blocked_time = 0; + } +out_true: + client->sending = NULL; + return true; +} + +static void clear_sender_send(sender_send_t *sender_send, cdata_t *cdata) +{ + dec_instance_ref(cdata, sender_send->client); + free(sender_send->buf); + free(sender_send); +} + +/* Use a thread to send queued messages, appending them to the sends list and + * iterating over all of them, attempting to send them all non-blocking to + * only send to those clients ready to receive data. */ +static void *sender(void *arg) +{ + cdata_t *cdata = (cdata_t *)arg; + sender_send_t *sends = NULL; + ckpool_t *ckp = cdata->ckp; + + rename_proc("csender"); + + while (42) { + int64_t sends_queued = 0, sends_size = 0; + sender_send_t *sending, *tmp; + + /* Check all sends to see if they can be written out */ + DL_FOREACH_SAFE(sends, sending, tmp) { + if (send_sender_send(ckp, cdata, sending)) { + DL_DELETE(sends, sending); + clear_sender_send(sending, cdata); + } else { + sends_queued++; + sends_size += sizeof(sender_send_t) + sending->len + 1; + } + } + + mutex_lock(&cdata->sender_lock); + cdata->sends_delayed += sends_queued; + cdata->sends_queued = sends_queued; + cdata->sends_size = sends_size; + /* Poll every 10ms if there are no new sends. */ + if (!cdata->sender_sends) { + const ts_t polltime = {0, 10000000}; + ts_t timeout_ts; + + ts_realtime(&timeout_ts); + timeraddspec(&timeout_ts, &polltime); + cond_timedwait(&cdata->sender_cond, &cdata->sender_lock, &timeout_ts); + } + if (cdata->sender_sends) { + DL_CONCAT(sends, cdata->sender_sends); + cdata->sender_sends = NULL; + } + mutex_unlock(&cdata->sender_lock); + } + /* We shouldn't get here unless there's an error */ + return NULL; +} + +static int add_redirect(ckpool_t *ckp, cdata_t *cdata, client_instance_t *client) +{ + redirect_t *redirect; + bool found; + + ck_wlock(&cdata->lock); + HASH_FIND_STR(cdata->redirects, client->address_name, redirect); + if (!redirect) { + redirect = ckzalloc(sizeof(redirect_t)); + strcpy(redirect->address_name, client->address_name); + redirect->redirect_no = cdata->redirect++; + if (cdata->redirect >= ckp->redirecturls) + cdata->redirect = 0; + HASH_ADD_STR(cdata->redirects, address_name, redirect); + found = false; + } else + found = true; + ck_wunlock(&cdata->lock); + + LOGNOTICE("Redirecting client %"PRId64" from %s IP %s to redirecturl %d", + client->id, found ? "matching" : "new", client->address_name, redirect->redirect_no); + return redirect->redirect_no; +} + +static void redirect_client(ckpool_t *ckp, client_instance_t *client) +{ + sender_send_t *sender_send; + cdata_t *cdata = ckp->cdata; + json_t *val; + char *buf; + int num; + + /* Set the redirected boool to only try redirecting them once */ + client->redirected = true; + + num = add_redirect(ckp, cdata, client); + JSON_CPACK(val, "{sosss[ssi]}", "id", json_null(), "method", "client.reconnect", + "params", ckp->redirecturl[num], ckp->redirectport[num], 0); + buf = json_dumps(val, JSON_EOL | JSON_COMPACT); + json_decref(val); + + sender_send = ckzalloc(sizeof(sender_send_t)); + sender_send->client = client; + sender_send->buf = buf; + sender_send->len = strlen(buf); + inc_instance_ref(cdata, client); + + mutex_lock(&cdata->sender_lock); + cdata->sends_generated++; + DL_APPEND(cdata->sender_sends, sender_send); + pthread_cond_signal(&cdata->sender_cond); + mutex_unlock(&cdata->sender_lock); +} + +/* Look for accepted shares in redirector mode to know we can redirect this + * client to a protected server. */ +static bool test_redirector_shares(cdata_t *cdata, client_instance_t *client, const char *buf) +{ + json_t *val = json_loads(buf, 0, NULL); + share_t *share, *found = NULL; + bool ret = false; + int64_t id; + + if (!val) { + /* Can happen when responding to invalid json from client */ + LOGINFO("Invalid json response to client %"PRId64 "%s", client->id, buf); + return ret; + } + if (!json_get_int64(&id, val, "id")) { + LOGINFO("Failed to find response id"); + goto out; + } + + ck_rlock(&cdata->lock); + DL_FOREACH(client->shares, share) { + if (share->id == id) { + LOGDEBUG("Found matching share %"PRId64" in trs for client %"PRId64, + id, client->id); + found = share; + break; + } + } + ck_runlock(&cdata->lock); + + if (found) { + bool result = false; + + if (!json_get_bool(&result, val, "result")) { + LOGINFO("Failed to find result in trs share"); + goto out; + } + if (!json_is_null(json_object_get(val, "error"))) { + LOGINFO("Got error for trs share"); + goto out; + } + if (!result) { + LOGDEBUG("Rejected trs share"); + goto out; + } + LOGNOTICE("Found accepted share for client %"PRId64" - redirecting", + client->id); + ret = true; + + /* Clear the list now since we don't need it any more */ + ck_wlock(&cdata->lock); + DL_FOREACH_SAFE(client->shares, share, found) { + DL_DELETE(client->shares, share); + dealloc(share); + } + ck_wunlock(&cdata->lock); + } +out: + json_decref(val); + return ret; +} + +/* Send a client by id a heap allocated buffer, allowing this function to + * free the ram. */ +static void send_client(ckpool_t *ckp, cdata_t *cdata, const int64_t id, char *buf) +{ + sender_send_t *sender_send; + client_instance_t *client; + bool redirect = false; + int64_t pass_id; + int len; + + if (unlikely(!buf)) { + LOGWARNING("Connector send_client sent a null buffer"); + return; + } + len = strlen(buf); + if (unlikely(!len)) { + LOGWARNING("Connector send_client sent a zero length buffer"); + free(buf); + return; + } + + if (unlikely(ckp->node && !id)) { + LOGDEBUG("Message for node: %s", buf); + send_proc(ckp->stratifier, buf); + free(buf); + return; + } + + /* Grab a reference to this client until the sender_send has + * completed processing. Is this a passthrough subclient ? */ + if ((pass_id = subclient(id))) { + int64_t client_id = id & 0xffffffffll; + + /* Make sure the passthrough exists for passthrough subclients */ + client = ref_client_by_id(cdata, pass_id); + if (unlikely(!client)) { + LOGINFO("Connector failed to find passthrough id %"PRId64" of client id %"PRId64" to send to", + pass_id, client_id); + /* Now see if the subclient exists */ + client = ref_client_by_id(cdata, client_id); + if (client) { + invalidate_client(ckp, cdata, client); + dec_instance_ref(cdata, client); + } else + stratifier_drop_id(ckp, id); + free(buf); + return; + } + } else { + client = ref_client_by_id(cdata, id); + if (unlikely(!client)) { + LOGINFO("Connector failed to find client id %"PRId64" to send to", id); + stratifier_drop_id(ckp, id); + free(buf); + return; + } + if (ckp->redirector && !client->redirected && client->authorised) { + /* If clients match the IP of clients that have already + * been whitelisted as finding valid shares then + * redirect them immediately. */ + if (redirect_matches(cdata, client)) + redirect = true; + else + redirect = test_redirector_shares(cdata, client, buf); + } + } + + sender_send = ckzalloc(sizeof(sender_send_t)); + sender_send->client = client; + sender_send->buf = buf; + sender_send->len = len; + + mutex_lock(&cdata->sender_lock); + cdata->sends_generated++; + DL_APPEND(cdata->sender_sends, sender_send); + pthread_cond_signal(&cdata->sender_cond); + mutex_unlock(&cdata->sender_lock); + + /* Redirect after sending response to shares and authorise */ + if (unlikely(redirect)) + redirect_client(ckp, client); +} + +static void send_client_json(ckpool_t *ckp, cdata_t *cdata, int64_t client_id, json_t *json_msg) +{ + client_instance_t *client; + char *msg; + + if (ckp->node && (client = ref_client_by_id(cdata, client_id))) { + json_t *val = json_deep_copy(json_msg); + + json_object_set_new_nocheck(val, "client_id", json_integer(client_id)); + json_object_set_new_nocheck(val, "address", json_string(client->address_name)); + json_object_set_new_nocheck(val, "server", json_integer(client->server)); + dec_instance_ref(cdata, client); + stratifier_add_recv(ckp, val); + } + if (ckp->passthrough && client_id) + json_object_del(json_msg, "node.method"); + + msg = json_dumps(json_msg, JSON_EOL | JSON_COMPACT); + send_client(ckp, cdata, client_id, msg); + json_decref(json_msg); +} + +/* When testing if a client exists, passthrough clients don't exist when their + * parent no longer exists. */ +static bool client_exists(cdata_t *cdata, int64_t id) +{ + int64_t parent_id = subclient(id); + client_instance_t *client; + + if (parent_id) + id = parent_id; + + ck_rlock(&cdata->lock); + HASH_FIND_I64(cdata->clients, &id, client); + ck_runlock(&cdata->lock); + + return !!client; +} + +static void passthrough_client(ckpool_t *ckp, cdata_t *cdata, client_instance_t *client) +{ + json_t *val; + + LOGINFO("Connector adding passthrough client %"PRId64, client->id); + client->passthrough = true; + JSON_CPACK(val, "{sb}", "result", true); + send_client_json(ckp, cdata, client->id, val); + if (!ckp->rmem_warn) + set_recvbufsize(ckp, client->fd, 1048576); + if (!ckp->wmem_warn) + client->sendbufsize = set_sendbufsize(ckp, client->fd, 1048576); +} + +static bool connect_upstream(ckpool_t *ckp, connsock_t *cs) +{ + json_t *req, *val = NULL, *res_val, *err_val; + bool res, ret = false; + float timeout = 10; + + cksem_wait(&cs->sem); + cs->fd = connect_socket(cs->url, cs->port); + if (cs->fd < 0) { + LOGWARNING("Failed to connect to upstream server %s:%s", cs->url, cs->port); + goto out; + } + keep_sockalive(cs->fd); + + /* We want large send buffers for upstreaming messages */ + if (!ckp->rmem_warn) + set_recvbufsize(ckp, cs->fd, 2097152); + if (!ckp->wmem_warn) + cs->sendbufsiz = set_sendbufsize(ckp, cs->fd, 2097152); + + JSON_CPACK(req, "{ss,s[s]}", + "method", "mining.remote", + "params", PACKAGE"/"VERSION); + res = send_json_msg(cs, req); + json_decref(req); + if (!res) { + LOGWARNING("Failed to send message in connect_upstream"); + goto out; + } + if (read_socket_line(cs, &timeout) < 1) { + LOGWARNING("Failed to receive line in connect_upstream"); + goto out; + } + val = json_msg_result(cs->buf, &res_val, &err_val); + if (!val || !res_val) { + LOGWARNING("Failed to get a json result in connect_upstream, got: %s", + cs->buf); + goto out; + } + ret = json_is_true(res_val); + if (!ret) { + LOGWARNING("Denied upstream trusted connection"); + goto out; + } + LOGWARNING("Connected to upstream server %s:%s as trusted remote", + cs->url, cs->port); + ret = true; +out: + cksem_post(&cs->sem); + + return ret; +} + +static void usend_process(ckpool_t *ckp, char *buf) +{ + cdata_t *cdata = ckp->cdata; + connsock_t *cs = &cdata->upstream_cs; + int len, sent; + + if (unlikely(!buf || !strlen(buf))) { + LOGERR("Send empty message to usend_process"); + goto out; + } + LOGDEBUG("Sending upstream msg: %s", buf); + len = strlen(buf); + while (42) { + sent = write_socket(cs->fd, buf, len); + if (sent == len) + break; + if (cs->fd > 0) { + LOGWARNING("Upstream pool failed, attempting reconnect while caching messages"); + Close(cs->fd); + } + do + sleep(5); + while (!connect_upstream(ckp, cs)); + } +out: + free(buf); +} + +static void ping_upstream(cdata_t *cdata) +{ + char *buf; + + ASPRINTF(&buf, "{\"method\":\"ping\"}\n"); + ckmsgq_add(cdata->upstream_sends, buf); +} + +static void *urecv_process(void *arg) +{ + ckpool_t *ckp = (ckpool_t *)arg; + cdata_t *cdata = ckp->cdata; + connsock_t *cs = &cdata->upstream_cs; + bool alive = true; + + rename_proc("ureceiver"); + + pthread_detach(pthread_self()); + + while (42) { + const char *method; + float timeout = 5; + json_t *val; + int ret; + + cksem_wait(&cs->sem); + ret = read_socket_line(cs, &timeout); + if (ret < 1) { + ping_upstream(cdata); + if (likely(!ret)) { + LOGDEBUG("No message from upstream pool"); + } else { + LOGNOTICE("Failed to read from upstream pool"); + alive = false; + } + goto nomsg; + } + alive = true; + val = json_loads(cs->buf, 0, NULL); + if (unlikely(!val)) { + LOGWARNING("Received non-json msg from upstream pool %s", + cs->buf); + goto nomsg; + } + method = json_string_value(json_object_get(val, "method")); + if (unlikely(!method)) { + LOGWARNING("Failed to find method from upstream pool json %s", + cs->buf); + json_decref(val); + goto decref; + } + if (!safecmp(method, stratum_msgs[SM_TRANSACTIONS])) + parse_upstream_txns(ckp, val); + else if (!safecmp(method, stratum_msgs[SM_AUTHRESULT])) + parse_upstream_auth(ckp, val); + else if (!safecmp(method, stratum_msgs[SM_WORKINFO])) + parse_upstream_workinfo(ckp, val); + else if (!safecmp(method, stratum_msgs[SM_BLOCK])) + parse_upstream_block(ckp, val); + else if (!safecmp(method, stratum_msgs[SM_REQTXNS])) + parse_upstream_reqtxns(ckp, val); + else if (!safecmp(method, "pong")) + LOGDEBUG("Received upstream pong"); + else + LOGWARNING("Unrecognised upstream method %s", method); +decref: + json_decref(val); +nomsg: + cksem_post(&cs->sem); + + if (!alive) + sleep(5); + } + return NULL; +} + +static bool setup_upstream(ckpool_t *ckp, cdata_t *cdata) +{ + connsock_t *cs = &cdata->upstream_cs; + bool ret = false; + pthread_t pth; + + cs->ckp = ckp; + if (!ckp->upstream) { + LOGEMERG("No upstream server set in remote trusted server mode"); + goto out; + } + if (!extract_sockaddr(ckp->upstream, &cs->url, &cs->port)) { + LOGEMERG("Failed to extract upstream address from %s", ckp->upstream); + goto out; + } + + cksem_init(&cs->sem); + cksem_post(&cs->sem); + + while (!connect_upstream(ckp, cs)) + cksleep_ms(5000); + + create_pthread(&pth, urecv_process, ckp); + cdata->upstream_sends = create_ckmsgq(ckp, "usender", &usend_process); + ret = true; +out: + return ret; +} + +static void client_message_processor(ckpool_t *ckp, json_t *json_msg) +{ + cdata_t *cdata = ckp->cdata; + client_instance_t *client; + int64_t client_id; + + /* Extract the client id from the json message and remove its entry */ + client_id = json_integer_value(json_object_get(json_msg, "client_id")); + json_object_del(json_msg, "client_id"); + /* Put client_id back in for a passthrough subclient, passing its + * upstream client_id instead of the passthrough's. */ + if (subclient(client_id)) + json_object_set_new_nocheck(json_msg, "client_id", json_integer(client_id & 0xffffffffll)); + + /* Flag redirector clients once they've been authorised */ + if (ckp->redirector && (client = ref_client_by_id(cdata, client_id))) { + if (!client->redirected && !client->authorised) { + json_t *method_val = json_object_get(json_msg, "node.method"); + const char *method = json_string_value(method_val); + + if (!safecmp(method, stratum_msgs[SM_AUTHRESULT])) + client->authorised = true; + } + dec_instance_ref(cdata, client); + } + send_client_json(ckp, cdata, client_id, json_msg); +} + +void connector_add_message(ckpool_t *ckp, json_t *val) +{ + cdata_t *cdata = ckp->cdata; + + ckmsgq_add(cdata->cmpq, val); +} + +/* Send the passthrough the terminate node.method */ +static void drop_passthrough_client(ckpool_t *ckp, cdata_t *cdata, const int64_t id) +{ + int64_t client_id; + char *msg; + + LOGINFO("Asked to drop passthrough client %"PRId64", forwarding to passthrough", id); + client_id = id & 0xffffffffll; + /* We have a direct connection to the passthrough's connector so we + * can send it any regular commands. */ + ASPRINTF(&msg, "dropclient=%"PRId64"\n", client_id); + send_client(ckp, cdata, id, msg); +} + +char *connector_stats(void *data, const int runtime) +{ + json_t *val = json_object(), *subval; + client_instance_t *client; + int objects, generated; + cdata_t *cdata = data; + sender_send_t *send; + int64_t memsize; + char *buf; + + /* If called in passthrough mode we log stats instead of the stratifier */ + if (runtime) + json_set_int(val, "runtime", runtime); + + ck_rlock(&cdata->lock); + objects = HASH_COUNT(cdata->clients); + memsize = SAFE_HASH_OVERHEAD(cdata->clients) + sizeof(client_instance_t) * objects; + generated = cdata->clients_generated; + ck_runlock(&cdata->lock); + + JSON_CPACK(subval, "{si,si,si}", "count", objects, "memory", memsize, "generated", generated); + json_set_object(val, "clients", subval); + + ck_rlock(&cdata->lock); + DL_COUNT2(cdata->dead_clients, client, objects, dead_next); + generated = cdata->dead_generated; + ck_runlock(&cdata->lock); + + memsize = objects * sizeof(client_instance_t); + JSON_CPACK(subval, "{si,si,si}", "count", objects, "memory", memsize, "generated", generated); + json_set_object(val, "dead", subval); + + objects = 0; + memsize = 0; + + mutex_lock(&cdata->sender_lock); + DL_FOREACH(cdata->sender_sends, send) { + objects++; + memsize += sizeof(sender_send_t) + send->len + 1; + } + JSON_CPACK(subval, "{si,si,si}", "count", objects, "memory", memsize, "generated", cdata->sends_generated); + json_set_object(val, "sends", subval); + + JSON_CPACK(subval, "{si,si,si}", "count", cdata->sends_queued, "memory", cdata->sends_size, "generated", cdata->sends_delayed); + mutex_unlock(&cdata->sender_lock); + + json_set_object(val, "delays", subval); + + buf = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER); + json_decref(val); + if (runtime) + LOGNOTICE("Passthrough:%s", buf); + else + LOGNOTICE("Connector stats: %s", buf); + return buf; +} + +void connector_send_fd(ckpool_t *ckp, const int fdno, const int sockd) +{ + cdata_t *cdata = ckp->cdata; + + if (fdno > -1 && fdno < ckp->serverurls) + send_fd(cdata->serverfd[fdno], sockd); + else + LOGWARNING("Connector asked to send invalid fd %d", fdno); +} + +static void connector_loop(proc_instance_t *pi, cdata_t *cdata) +{ + unix_msg_t *umsg = NULL; + ckpool_t *ckp = pi->ckp; + time_t last_stats; + int64_t client_id; + int ret = 0; + char *buf; + + last_stats = cdata->start_time; + +retry: + if (ckp->passthrough) { + time_t diff = time(NULL); + + if (diff - last_stats >= 60) { + last_stats = diff; + diff -= cdata->start_time; + buf = connector_stats(cdata, diff); + dealloc(buf); + } + } + + if (umsg) { + Close(umsg->sockd); + free(umsg->buf); + dealloc(umsg); + } + + do { + umsg = get_unix_msg(pi); + } while (!umsg); + + buf = umsg->buf; + LOGDEBUG("Connector received message: %s", buf); + /* The bulk of the messages will be json messages to send to clients + * so look for them first. */ + if (likely(buf[0] == '{')) { + json_t *val = json_loads(buf, JSON_DISABLE_EOF_CHECK, NULL); + + ckmsgq_add(cdata->cmpq, val); + } else if (cmdmatch(buf, "dropclient")) { + client_instance_t *client; + + ret = sscanf(buf, "dropclient=%"PRId64, &client_id); + if (ret < 0) { + LOGDEBUG("Connector failed to parse dropclient command: %s", buf); + goto retry; + } + /* A passthrough client */ + if (subclient(client_id)) { + drop_passthrough_client(ckp, cdata, client_id); + goto retry; + } + client = ref_client_by_id(cdata, client_id); + if (unlikely(!client)) { + LOGINFO("Connector failed to find client id %"PRId64" to drop", client_id); + goto retry; + } + ret = invalidate_client(ckp, cdata, client); + dec_instance_ref(cdata, client); + if (ret >= 0) + LOGINFO("Connector dropped client id: %"PRId64, client_id); + } else if (cmdmatch(buf, "testclient")) { + ret = sscanf(buf, "testclient=%"PRId64, &client_id); + if (unlikely(ret < 0)) { + LOGDEBUG("Connector failed to parse testclient command: %s", buf); + goto retry; + } + if (client_exists(cdata, client_id)) + goto retry; + LOGINFO("Connector detected non-existent client id: %"PRId64, client_id); + stratifier_drop_id(ckp, client_id); + } else if (cmdmatch(buf, "ping")) { + LOGDEBUG("Connector received ping request"); + send_unix_msg(umsg->sockd, "pong"); + } else if (cmdmatch(buf, "accept")) { + LOGDEBUG("Connector received accept signal"); + cdata->accept = true; + } else if (cmdmatch(buf, "reject")) { + LOGDEBUG("Connector received reject signal"); + cdata->accept = false; + if (ckp->passthrough) + drop_all_clients(cdata); + } else if (cmdmatch(buf, "stats")) { + char *msg; + + LOGDEBUG("Connector received stats request"); + msg = connector_stats(cdata, 0); + send_unix_msg(umsg->sockd, msg); + } else if (cmdmatch(buf, "loglevel")) { + sscanf(buf, "loglevel=%d", &ckp->loglevel); + } else if (cmdmatch(buf, "passthrough")) { + client_instance_t *client; + + ret = sscanf(buf, "passthrough=%"PRId64, &client_id); + if (ret < 0) { + LOGDEBUG("Connector failed to parse passthrough command: %s", buf); + goto retry; + } + client = ref_client_by_id(cdata, client_id); + if (unlikely(!client)) { + LOGINFO("Connector failed to find client id %"PRId64" to pass through", client_id); + goto retry; + } + passthrough_client(ckp, cdata, client); + dec_instance_ref(cdata, client); + } else if (cmdmatch(buf, "getxfd")) { + int fdno = -1; + + sscanf(buf, "getxfd%d", &fdno); + if (fdno > -1 && fdno < ckp->serverurls) + send_fd(cdata->serverfd[fdno], umsg->sockd); + } else + LOGWARNING("Unhandled connector message: %s", buf); + goto retry; +} + +void *connector(void *arg) +{ + proc_instance_t *pi = (proc_instance_t *)arg; + cdata_t *cdata = ckzalloc(sizeof(cdata_t)); + char newurl[INET6_ADDRSTRLEN], newport[8]; + int threads, sockd, i, tries = 0, ret; + ckpool_t *ckp = pi->ckp; + const int on = 1; + + rename_proc(pi->processname); + LOGWARNING("%s connector starting", ckp->name); + ckp->cdata = cdata; + cdata->ckp = ckp; + + if (!ckp->serverurls) { + /* No serverurls have been specified. Bind to all interfaces + * on default sockets. */ + struct sockaddr_in serv_addr; + + cdata->serverfd = ckalloc(sizeof(int *)); + + sockd = socket(AF_INET, SOCK_STREAM, 0); + if (sockd < 0) { + LOGERR("Connector failed to open socket"); + goto out; + } + setsockopt(sockd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); + memset(&serv_addr, 0, sizeof(serv_addr)); + serv_addr.sin_family = AF_INET; + serv_addr.sin_addr.s_addr = htonl(INADDR_ANY); + serv_addr.sin_port = htons(ckp->proxy ? 3334 : 3333); + do { + ret = bind(sockd, (struct sockaddr*)&serv_addr, sizeof(serv_addr)); + + if (!ret) + break; + LOGWARNING("Connector failed to bind to socket, retrying in 5s"); + sleep(5); + } while (++tries < 25); + if (ret < 0) { + LOGERR("Connector failed to bind to socket for 2 minutes"); + Close(sockd); + goto out; + } + /* Set listen backlog to larger than SOMAXCONN in case the + * system configuration supports it */ + if (listen(sockd, 8192) < 0) { + LOGERR("Connector failed to listen on socket"); + Close(sockd); + goto out; + } + cdata->serverfd[0] = sockd; + url_from_socket(sockd, newurl, newport); + ASPRINTF(&ckp->serverurl[0], "%s:%s", newurl, newport); + ckp->serverurls = 1; + } else { + cdata->serverfd = ckalloc(sizeof(int *) * ckp->serverurls); + + for (i = 0; i < ckp->serverurls; i++) { + char oldurl[INET6_ADDRSTRLEN], oldport[8]; + char *serverurl = ckp->serverurl[i]; + int port; + + if (!url_from_serverurl(serverurl, newurl, newport)) { + LOGWARNING("Failed to extract resolved url from %s", serverurl); + goto out; + } + port = atoi(newport); + /* All high port servers are treated as highdiff ports */ + if (port > 4000) { + LOGNOTICE("Highdiff server %s", serverurl); + ckp->server_highdiff[i] = true; + } + sockd = ckp->oldconnfd[i]; + if (url_from_socket(sockd, oldurl, oldport)) { + if (strcmp(newurl, oldurl) || strcmp(newport, oldport)) { + LOGWARNING("Handed over socket url %s:%s does not match config %s:%s, creating new socket", + oldurl, oldport, newurl, newport); + Close(sockd); + } + } + + do { + if (sockd > 0) + break; + sockd = bind_socket(newurl, newport); + if (sockd > 0) + break; + LOGWARNING("Connector failed to bind to socket, retrying in 5s"); + sleep(5); + } while (++tries < 25); + + if (sockd < 0) { + LOGERR("Connector failed to bind to socket for 2 minutes"); + goto out; + } + if (listen(sockd, 8192) < 0) { + LOGERR("Connector failed to listen on socket"); + Close(sockd); + goto out; + } + cdata->serverfd[i] = sockd; + } + } + + if (tries) + LOGWARNING("Connector successfully bound to socket"); + + cdata->cmpq = create_ckmsgq(ckp, "cmpq", &client_message_processor); + + if (ckp->remote && !setup_upstream(ckp, cdata)) + goto out; + + cklock_init(&cdata->lock); + cdata->pi = pi; + cdata->nfds = 0; + /* Set the client id to the highest serverurl count to distinguish + * them from the server fds in epoll. */ + cdata->client_ids = ckp->serverurls; + mutex_init(&cdata->sender_lock); + cond_init(&cdata->sender_cond); + create_pthread(&cdata->pth_sender, sender, cdata); + threads = sysconf(_SC_NPROCESSORS_ONLN) / 2 ? : 1; + cdata->cevents = create_ckmsgqs(ckp, "cevent", &client_event_processor, threads); + create_pthread(&cdata->pth_receiver, receiver, cdata); + cdata->start_time = time(NULL); + + ckp->connector_ready = true; + LOGWARNING("%s connector ready", ckp->name); + + connector_loop(pi, cdata); +out: + /* We should never get here unless there's a fatal error */ + LOGEMERG("Connector failure, shutting down"); + exit(1); + return NULL; +} diff --git a/solo-ckpool-source/src/connector.h b/solo-ckpool-source/src/connector.h new file mode 100644 index 0000000..be945ef --- /dev/null +++ b/solo-ckpool-source/src/connector.h @@ -0,0 +1,20 @@ +/* + * Copyright 2014-2016 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#ifndef CONNECTOR_H +#define CONNECTOR_H + +int64_t connector_newclientid(ckpool_t *ckp); +void connector_upstream_msg(ckpool_t *ckp, char *msg); +void connector_add_message(ckpool_t *ckp, json_t *val); +char *connector_stats(void *data, const int runtime); +void connector_send_fd(ckpool_t *ckp, const int fdno, const int sockd); +void *connector(void *arg); + +#endif /* CONNECTOR_H */ diff --git a/solo-ckpool-source/src/generator.c b/solo-ckpool-source/src/generator.c new file mode 100644 index 0000000..22e2e08 --- /dev/null +++ b/solo-ckpool-source/src/generator.c @@ -0,0 +1,3422 @@ +/* + * Copyright 2014-2017,2023 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include +#include +#include +#include +#include + +#include "ckpool.h" +#include "libckpool.h" +#include "generator.h" +#include "stratifier.h" +#include "bitcoin.h" +#include "uthash.h" +#include "utlist.h" + +struct notify_instance { + /* Hash table data */ + UT_hash_handle hh; + int64_t id64; + + char prevhash[68]; + json_t *jobid; + char *coinbase1; + char *coinbase2; + int coinb1len; + int merkles; + char merklehash[16][68]; + char nbit[12]; + char ntime[12]; + char bbversion[12]; + bool clean; + + time_t notify_time; +}; + +typedef struct notify_instance notify_instance_t; + +typedef struct proxy_instance proxy_instance_t; + +struct share_msg { + UT_hash_handle hh; + int64_t id64; // Our own id for submitting upstream + + int64_t client_id; + time_t submit_time; + double diff; +}; + +typedef struct share_msg share_msg_t; + +struct stratum_msg { + struct stratum_msg *next; + struct stratum_msg *prev; + + json_t *json_msg; + int64_t client_id; +}; + +typedef struct stratum_msg stratum_msg_t; + +struct pass_msg { + proxy_instance_t *proxy; + connsock_t *cs; + char *msg; +}; + +typedef struct pass_msg pass_msg_t; +typedef struct cs_msg cs_msg_t; + +/* Statuses of various proxy states - connect, subscribe and auth */ +enum proxy_stat { + STATUS_INIT = 0, + STATUS_SUCCESS, + STATUS_FAIL +}; + +static const char *proxy_status[] = { + "Initial", + "Success", + "Failed" +}; + +/* Per proxied pool instance data */ +struct proxy_instance { + UT_hash_handle hh; /* Proxy list */ + UT_hash_handle sh; /* Subproxy list */ + proxy_instance_t *next; /* For dead proxy list */ + proxy_instance_t *prev; /* For dead proxy list */ + + ckpool_t *ckp; + connsock_t cs; + bool passthrough; + bool node; + int id; /* Proxy server id*/ + int subid; /* Subproxy id */ + int userid; /* User id if this proxy is bound to a user */ + + char *baseurl; + char *url; + char *auth; + char *pass; + + char *enonce1; + char *enonce1bin; + int nonce1len; + int nonce2len; + + tv_t last_message; + + double diff; + double diff_accepted; + double diff_rejected; + double total_accepted; /* Used only by parent proxy structures */ + double total_rejected; /* "" */ + tv_t last_share; + + /* Diff shares per second for 1/5/60... minute rolling averages */ + double dsps1; + double dsps5; + double dsps60; + double dsps360; + double dsps1440; + tv_t last_decay; + + /* Total diff shares per second for all subproxies */ + double tdsps1; /* Used only by parent proxy structures */ + double tdsps5; /* "" */ + double tdsps60; /* "" */ + double tdsps360; /* "" */ + double tdsps1440; /* "" */ + tv_t total_last_decay; + + bool no_params; /* Doesn't want any parameters on subscribe */ + + bool global; /* Part of the global list of proxies */ + bool disabled; /* Subproxy no longer to be used */ + bool reconnect; /* We need to drop and reconnect */ + bool reconnecting; /* Testing of parent in progress */ + int64_t recruit; /* No of recruiting requests in progress */ + bool alive; + bool authorised; + + /* Which of STATUS_* states are these in */ + enum proxy_stat connect_status; + enum proxy_stat subscribe_status; + enum proxy_stat auth_status; + + /* Back off from retrying if we fail one of the above */ + int backoff; + + /* Are we in the middle of a blocked write of this message? */ + cs_msg_t *sending; + + pthread_t pth_precv; + + ckmsgq_t *passsends; // passthrough sends + + char_entry_t *recvd_lines; /* Linked list of unprocessed messages */ + + int epfd; /* Epoll fd used by the parent proxy */ + + mutex_t proxy_lock; /* Lock protecting hashlist of proxies */ + proxy_instance_t *parent; /* Parent proxy of subproxies */ + proxy_instance_t *subproxies; /* Hashlist of subproxies of this proxy */ + int64_t clients_per_proxy; /* Max number of clients of this proxy */ + int subproxy_count; /* Number of subproxies */ +}; + +/* Private data for the generator */ +struct generator_data { + ckpool_t *ckp; + mutex_t lock; /* Lock protecting linked lists */ + proxy_instance_t *proxies; /* Hash list of all proxies */ + proxy_instance_t *dead_proxies; /* Disabled proxies */ + int proxies_generated; + int subproxies_generated; + + int64_t proxy_notify_id; // Globally increasing notify id + pthread_t pth_uprecv; // User proxy receive thread + pthread_t pth_psend; // Combined proxy send thread + + mutex_t psend_lock; // Lock associated with conditional below + pthread_cond_t psend_cond; + + stratum_msg_t *psends; + int psends_generated; + + mutex_t notify_lock; + notify_instance_t *notify_instances; + + mutex_t share_lock; + share_msg_t *shares; + int64_t share_id; + + server_instance_t *current_si; // Current server instance + + proxy_instance_t *current_proxy; +}; + +typedef struct generator_data gdata_t; + +/* Use a temporary fd when testing server_alive to avoid races on cs->fd */ +static bool server_alive(ckpool_t *ckp, server_instance_t *si, bool pinging) +{ + char *userpass = NULL; + bool ret = false; + connsock_t *cs; + gbtbase_t gbt; + int fd; + + if (si->alive) + return true; + cs = &si->cs; + if (!extract_sockaddr(si->url, &cs->url, &cs->port)) { + LOGWARNING("Failed to extract address from %s", si->url); + return ret; + } + userpass = strdup(si->auth); + realloc_strcat(&userpass, ":"); + realloc_strcat(&userpass, si->pass); + dealloc(cs->auth); + cs->auth = http_base64(userpass); + if (!cs->auth) { + LOGWARNING("Failed to create base64 auth from %s", userpass); + dealloc(userpass); + return ret; + } + dealloc(userpass); + + fd = connect_socket(cs->url, cs->port); + if (fd < 0) { + if (!pinging) + LOGWARNING("Failed to connect socket to %s:%s !", cs->url, cs->port); + return ret; + } + + /* Test we can connect, authorise and get a block template */ + if (!gen_gbtbase(cs, &gbt)) { + if (!pinging) { + LOGINFO("Failed to get test block template from %s:%s!", + cs->url, cs->port); + } + goto out; + } + clear_gbtbase(&gbt); + if (unlikely(ckp->btcsolo && !ckp->btcaddress)) { + /* If no btcaddress is specified in solobtc mode, choose one of + * the donation addresses from mainnet, testnet, or regtest for + * coinbase validation later on, although it will not be used + * for mining. */ + if (validate_address(cs, ckp->donaddress, &ckp->script, &ckp->segwit)) + ckp->btcaddress = ckp->donaddress; + else if (validate_address(cs, ckp->tndonaddress, &ckp->script, &ckp->segwit)) + ckp->btcaddress = ckp->tndonaddress; + else if (validate_address(cs, ckp->rtdonaddress, &ckp->script, &ckp->segwit)) + ckp->btcaddress = ckp->rtdonaddress; + } + + if (!ckp->node && !validate_address(cs, ckp->btcaddress, &ckp->script, &ckp->segwit)) { + LOGWARNING("Invalid btcaddress: %s !", ckp->btcaddress); + goto out; + } + si->alive = cs->alive = ret = true; + LOGNOTICE("Server alive: %s:%s", cs->url, cs->port); +out: + /* Close the file handle */ + close(fd); + return ret; +} + +/* Find the highest priority server alive and return it */ +static server_instance_t *live_server(ckpool_t *ckp, gdata_t *gdata) +{ + server_instance_t *alive = NULL; + connsock_t *cs; + int i; + + LOGDEBUG("Attempting to connect to bitcoind"); +retry: + /* First find a server that is already flagged alive if possible + * without blocking on server_alive() */ + for (i = 0; i < ckp->btcds; i++) { + server_instance_t *si = ckp->servers[i]; + cs = &si->cs; + + if (si->alive) { + alive = si; + goto living; + } + } + + /* No servers flagged alive, try to connect to them blocking */ + for (i = 0; i < ckp->btcds; i++) { + server_instance_t *si = ckp->servers[i]; + + if (server_alive(ckp, si, false)) { + alive = si; + goto living; + } + } + LOGWARNING("CRITICAL: No bitcoinds active!"); + sleep(5); + goto retry; +living: + gdata->current_si = alive; + cs = &alive->cs; + LOGINFO("Connected to live server %s:%s", cs->url, cs->port); + send_proc(ckp->connector, alive ? "accept" : "reject"); + return alive; +} + +static void kill_server(server_instance_t *si) +{ + connsock_t *cs; + + if (!si) // This shouldn't happen + return; + + LOGNOTICE("Killing server"); + cs = &si->cs; + Close(cs->fd); + empty_buffer(cs); + dealloc(cs->url); + dealloc(cs->port); + dealloc(cs->auth); +} + +static void clear_unix_msg(unix_msg_t **umsg) +{ + if (*umsg) { + Close((*umsg)->sockd); + free((*umsg)->buf); + free(*umsg); + *umsg = NULL; + } +} + +bool generator_submitblock(ckpool_t *ckp, const char *buf) +{ + gdata_t *gdata = ckp->gdata; + server_instance_t *si; + bool warn = false; + connsock_t *cs; + + while (unlikely(!(si = gdata->current_si))) { + if (!warn) + LOGWARNING("No live current server in generator_blocksubmit! Resubmitting indefinitely!"); + warn = true; + cksleep_ms(10); + } + cs = &si->cs; + LOGNOTICE("Submitting block data!"); + return submit_block(cs, buf); +} + +void generator_preciousblock(ckpool_t *ckp, const char *hash) +{ + gdata_t *gdata = ckp->gdata; + server_instance_t *si; + connsock_t *cs; + + if (unlikely(!(si = gdata->current_si))) { + LOGWARNING("No live current server in generator_get_blockhash"); + return; + } + cs = &si->cs; + precious_block(cs, hash); +} + +bool generator_get_blockhash(ckpool_t *ckp, int height, char *hash) +{ + gdata_t *gdata = ckp->gdata; + server_instance_t *si; + connsock_t *cs; + + if (unlikely(!(si = gdata->current_si))) { + LOGWARNING("No live current server in generator_get_blockhash"); + return false; + } + cs = &si->cs; + return get_blockhash(cs, height, hash); +} + +static void gen_loop(proc_instance_t *pi) +{ + server_instance_t *si = NULL, *old_si; + unix_msg_t *umsg = NULL; + ckpool_t *ckp = pi->ckp; + char *buf = NULL; + connsock_t *cs; + gbtbase_t gbt; + char hash[68]; + +reconnect: + clear_unix_msg(&umsg); + old_si = si; + si = live_server(ckp, ckp->gdata); + if (!si) + goto out; + if (unlikely(!ckp->generator_ready)) { + ckp->generator_ready = true; + LOGWARNING("%s generator ready", ckp->name); + } + + cs = &si->cs; + if (!old_si) + LOGWARNING("Connected to bitcoind: %s:%s", cs->url, cs->port); + else if (si != old_si) + LOGWARNING("Failed over to bitcoind: %s:%s", cs->url, cs->port); + +retry: + clear_unix_msg(&umsg); + + do { + umsg = get_unix_msg(pi); + } while (!umsg); + + if (unlikely(!si->alive)) { + LOGWARNING("%s:%s Bitcoind socket invalidated, will attempt failover", cs->url, cs->port); + goto reconnect; + } + + buf = umsg->buf; + LOGDEBUG("Generator received request: %s", buf); + if (cmdmatch(buf, "getbase")) { + if (!gen_gbtbase(cs, &gbt)) { + LOGWARNING("Failed to get block template from %s:%s", + cs->url, cs->port); + si->alive = cs->alive = false; + send_unix_msg(umsg->sockd, "Failed"); + goto reconnect; + } else { + char *s = json_dumps(gbt.json, JSON_NO_UTF8); + + send_unix_msg(umsg->sockd, s); + free(s); + clear_gbtbase(&gbt); + } + } else if (cmdmatch(buf, "getbest")) { + if (si->notify) + send_unix_msg(umsg->sockd, "notify"); + else if (!get_bestblockhash(cs, hash)) { + LOGINFO("No best block hash support from %s:%s", + cs->url, cs->port); + si->alive = cs->alive = false; + send_unix_msg(umsg->sockd, "failed"); + } else { + send_unix_msg(umsg->sockd, hash); + } + } else if (cmdmatch(buf, "getlast")) { + int height; + + if (si->notify) + send_unix_msg(umsg->sockd, "notify"); + else if ((height = get_blockcount(cs)) == -1) { + si->alive = cs->alive = false; + send_unix_msg(umsg->sockd, "failed"); + goto reconnect; + } else { + LOGDEBUG("Height: %d", height); + if (!get_blockhash(cs, height, hash)) { + si->alive = cs->alive = false; + send_unix_msg(umsg->sockd, "failed"); + goto reconnect; + } else { + send_unix_msg(umsg->sockd, hash); + LOGDEBUG("Hash: %s", hash); + } + } + } else if (cmdmatch(buf, "submitblock:")) { + char blockmsg[80]; + bool ret; + + LOGNOTICE("Submitting block data!"); + ret = submit_block(cs, buf + 12 + 64 + 1); + memset(buf + 12 + 64, 0, 1); + sprintf(blockmsg, "%sblock:%s", ret ? "" : "no", buf + 12); + send_proc(ckp->stratifier, blockmsg); + } else if (cmdmatch(buf, "reconnect")) { + goto reconnect; + } else if (cmdmatch(buf, "loglevel")) { + sscanf(buf, "loglevel=%d", &ckp->loglevel); + } else if (cmdmatch(buf, "ping")) { + LOGDEBUG("Generator received ping request"); + send_unix_msg(umsg->sockd, "pong"); + } + goto retry; + +out: + kill_server(si); +} + +static bool connect_proxy(ckpool_t *ckp, connsock_t *cs, proxy_instance_t *proxy) +{ + if (cs->fd > 0) { + epoll_ctl(proxy->epfd, EPOLL_CTL_DEL, cs->fd, NULL); + Close(cs->fd); + } + cs->fd = connect_socket(cs->url, cs->port); + if (cs->fd < 0) { + LOGINFO("Failed to connect socket to %s:%s in connect_proxy", + cs->url, cs->port); + return false; + } + keep_sockalive(cs->fd); + if (!ckp->passthrough) { + struct epoll_event event; + + event.events = EPOLLIN | EPOLLRDHUP; + event.data.ptr = proxy; + /* Add this connsock_t to the epoll list */ + if (unlikely(epoll_ctl(proxy->epfd, EPOLL_CTL_ADD, cs->fd, &event) == -1)) { + LOGERR("Failed to add fd %d to epfd %d to epoll_ctl in proxy_alive", + cs->fd, proxy->epfd); + return false; + } + } else { + /* We want large send/recv buffers on passthroughs */ + if (!ckp->rmem_warn) + cs->rcvbufsiz = set_recvbufsize(ckp, cs->fd, 1048576); + if (!ckp->wmem_warn) + cs->sendbufsiz = set_sendbufsize(ckp, cs->fd, 1048576); + } + return true; +} + +/* For some reason notify is buried at various different array depths so use + * a reentrant function to try and find it. */ +static json_t *find_notify(json_t *val) +{ + int arr_size, i; + json_t *ret = NULL; + const char *entry; + + if (!json_is_array(val)) + return NULL; + arr_size = json_array_size(val); + entry = json_string_value(json_array_get(val, 0)); + if (cmdmatch(entry, "mining.notify")) + return val; + for (i = 0; i < arr_size; i++) { + json_t *arr_val; + + arr_val = json_array_get(val, i); + ret = find_notify(arr_val); + if (ret) + break; + } + return ret; +} + +/* Get stored line in the proxy linked list of messages if any exist or NULL */ +static char *cached_proxy_line(proxy_instance_t *proxi) +{ + char *buf = NULL; + + if (proxi->recvd_lines) { + char_entry_t *char_t = proxi->recvd_lines; + + DL_DELETE(proxi->recvd_lines, char_t); + buf = char_t->buf; + free(char_t); + } + return buf; +} + +/* Get next line in the proxy linked list of messages or a new line from the + * connsock if there are none. */ +static char *next_proxy_line(connsock_t *cs, proxy_instance_t *proxi) +{ + char *buf = cached_proxy_line(proxi); + float timeout = 10; + + if (!buf && read_socket_line(cs, &timeout) > 0) + buf = strdup(cs->buf); + return buf; +} + +/* For appending a line to the proxy recv list */ +static void append_proxy_line(proxy_instance_t *proxi, const char *buf) +{ + char_entry_t *char_t = ckalloc(sizeof(char_entry_t)); + char_t->buf = strdup(buf); + DL_APPEND(proxi->recvd_lines, char_t); +} + +/* Get a new line from the connsock and return a copy of it */ +static char *new_proxy_line(connsock_t *cs) +{ + float timeout = 10; + char *buf = NULL; + + if (read_socket_line(cs, &timeout) < 1) + goto out; + buf = strdup(cs->buf); +out: + return buf; +} + +static inline bool parent_proxy(const proxy_instance_t *proxy) +{ + return (proxy->parent == proxy); +} + +static void recruit_subproxies(proxy_instance_t *proxi, const int recruits); + +static bool parse_subscribe(connsock_t *cs, proxy_instance_t *proxi) +{ + json_t *val = NULL, *res_val, *notify_val, *tmp; + bool parsed, ret = false; + int retries = 0, size; + const char *string; + char *buf, *old; + +retry: + parsed = true; + if (!(buf = new_proxy_line(cs))) { + LOGNOTICE("Proxy %d:%d %s failed to receive line in parse_subscribe", + proxi->id, proxi->subid, proxi->url); + goto out; + } + LOGDEBUG("parse_subscribe received %s", buf); + /* Ignore err_val here stored in &tmp */ + val = json_msg_result(buf, &res_val, &tmp); + if (!val || !res_val) { + LOGINFO("Failed to get a json result in parse_subscribe, got: %s", buf); + parsed = false; + } + if (!json_is_array(res_val)) { + LOGINFO("Result in parse_subscribe not an array"); + parsed = false; + } + size = json_array_size(res_val); + if (size < 3) { + LOGINFO("Result in parse_subscribe array too small"); + parsed = false; + } + notify_val = find_notify(res_val); + if (!notify_val) { + LOGINFO("Failed to find notify in parse_subscribe"); + parsed = false; + } + if (!parsed) { + if (++retries < 3) { + /* We don't want this response so put it on the proxy + * recvd list to be parsed later */ + append_proxy_line(proxi, buf); + buf = NULL; + goto retry; + } + LOGNOTICE("Proxy %d:%d %s failed to parse subscribe response in parse_subscribe", + proxi->id, proxi->subid, proxi->url); + goto out; + } + + tmp = json_array_get(res_val, 1); + if (!tmp || !json_is_string(tmp)) { + LOGWARNING("Failed to parse enonce1 in parse_subscribe"); + goto out; + } + string = json_string_value(tmp); + old = proxi->enonce1; + proxi->enonce1 = strdup(string); + free(old); + proxi->nonce1len = strlen(proxi->enonce1) / 2; + if (proxi->nonce1len > 15) { + LOGWARNING("Nonce1 too long at %d", proxi->nonce1len); + goto out; + } + old = proxi->enonce1bin; + proxi->enonce1bin = ckalloc(proxi->nonce1len); + free(old); + hex2bin(proxi->enonce1bin, proxi->enonce1, proxi->nonce1len); + tmp = json_array_get(res_val, 2); + if (!tmp || !json_is_integer(tmp)) { + LOGWARNING("Failed to parse nonce2len in parse_subscribe"); + goto out; + } + size = json_integer_value(tmp); + if (size < 1 || size > 8) { + LOGWARNING("Invalid nonce2len %d in parse_subscribe", size); + goto out; + } + if (size < 3) { + if (!proxi->subid) { + LOGWARNING("Proxy %d %s Nonce2 length %d too small for fast miners", + proxi->id, proxi->url, size); + } else { + LOGNOTICE("Proxy %d:%d Nonce2 length %d too small for fast miners", + proxi->id, proxi->subid, size); + } + } + proxi->nonce2len = size; + proxi->clients_per_proxy = 1ll << ((size - 3) * 8); + + LOGNOTICE("Found notify for new proxy %d:%d with enonce %s nonce2len %d", proxi->id, + proxi->subid, proxi->enonce1, proxi->nonce2len); + ret = true; + +out: + if (val) + json_decref(val); + free(buf); + return ret; +} + +/* cs semaphore must be held */ +static bool subscribe_stratum(ckpool_t *ckp, connsock_t *cs, proxy_instance_t *proxi) +{ + bool ret = false; + json_t *req; + +retry: + /* Attempt to connect with the client description g*/ + if (!proxi->no_params) { + JSON_CPACK(req, "{s:i,s:s,s:[s]}", + "id", 0, + "method", "mining.subscribe", + "params", PACKAGE"/"VERSION); + /* Then try without any parameters */ + } else { + JSON_CPACK(req, "{s:i,s:s,s:[]}", + "id", 0, + "method", "mining.subscribe", + "params"); + } + ret = send_json_msg(cs, req); + json_decref(req); + if (!ret) { + LOGNOTICE("Proxy %d:%d %s failed to send message in subscribe_stratum", + proxi->id, proxi->subid, proxi->url); + goto out; + } + ret = parse_subscribe(cs, proxi); + if (ret) + goto out; + + if (proxi->no_params) { + LOGNOTICE("Proxy %d:%d %s failed all subscription options in subscribe_stratum", + proxi->id, proxi->subid, proxi->url); + goto out; + } + LOGINFO("Proxy %d:%d %s failed connecting with parameters in subscribe_stratum, retrying without", + proxi->id, proxi->subid, proxi->url); + proxi->no_params = true; + ret = connect_proxy(ckp, cs, proxi); + if (!ret) { + LOGNOTICE("Proxy %d:%d %s failed to reconnect in subscribe_stratum", + proxi->id, proxi->subid, proxi->url); + goto out; + } + goto retry; + +out: + if (!ret && cs->fd > 0) { + epoll_ctl(proxi->epfd, EPOLL_CTL_DEL, cs->fd, NULL); + Close(cs->fd); + } + return ret; +} + +/* cs semaphore must be held */ +static bool passthrough_stratum(connsock_t *cs, proxy_instance_t *proxi) +{ + json_t *req, *val = NULL, *res_val, *err_val; + bool res, ret = false; + float timeout = 10; + + JSON_CPACK(req, "{ss,s[s]}", + "method", "mining.passthrough", + "params", PACKAGE"/"VERSION); + res = send_json_msg(cs, req); + json_decref(req); + if (!res) { + LOGWARNING("Failed to send message in passthrough_stratum"); + goto out; + } + if (read_socket_line(cs, &timeout) < 1) { + LOGWARNING("Failed to receive line in passthrough_stratum"); + goto out; + } + /* Ignore err_val here since we should always get a result from an + * upstream passthrough server */ + val = json_msg_result(cs->buf, &res_val, &err_val); + if (!val || !res_val) { + LOGWARNING("Failed to get a json result in passthrough_stratum, got: %s", + cs->buf); + goto out; + } + ret = json_is_true(res_val); + if (!ret) { + LOGWARNING("Denied passthrough for stratum"); + goto out; + } + proxi->passthrough = true; +out: + if (val) + json_decref(val); + if (!ret) + Close(cs->fd); + return ret; +} + +/* cs semaphore must be held */ +static bool node_stratum(connsock_t *cs, proxy_instance_t *proxi) +{ + json_t *req, *val = NULL, *res_val, *err_val; + bool res, ret = false; + float timeout = 10; + + JSON_CPACK(req, "{ss,s[s]}", + "method", "mining.node", + "params", PACKAGE"/"VERSION); + + res = send_json_msg(cs, req); + json_decref(req); + if (!res) { + LOGWARNING("Failed to send message in node_stratum"); + goto out; + } + if (read_socket_line(cs, &timeout) < 1) { + LOGWARNING("Failed to receive line in node_stratum"); + goto out; + } + /* Ignore err_val here since we should always get a result from an + * upstream server */ + val = json_msg_result(cs->buf, &res_val, &err_val); + if (!val || !res_val) { + LOGWARNING("Failed to get a json result in node_stratum, got: %s", + cs->buf); + goto out; + } + ret = json_is_true(res_val); + if (!ret) { + LOGWARNING("Denied node setup for stratum"); + goto out; + } + proxi->node = true; +out: + if (val) + json_decref(val); + if (!ret) + Close(cs->fd); + return ret; +} + +static void send_notify(ckpool_t *ckp, proxy_instance_t *proxi, notify_instance_t *ni); + +static void reconnect_generator(ckpool_t *ckp) +{ + send_proc(ckp->generator, "reconnect"); +} + +struct genwork *generator_getbase(ckpool_t *ckp) +{ + gdata_t *gdata = ckp->gdata; + gbtbase_t *gbt = NULL; + server_instance_t *si; + connsock_t *cs; + + /* Use temporary variables to prevent deref while accessing */ + si = gdata->current_si; + if (unlikely(!si)) { + LOGWARNING("No live current server in generator_genbase"); + goto out; + } + cs = &si->cs; + gbt = ckzalloc(sizeof(gbtbase_t)); + if (unlikely(!gen_gbtbase(cs, gbt))) { + LOGWARNING("Failed to get block template from %s:%s", cs->url, cs->port); + si->alive = cs->alive = false; + reconnect_generator(ckp); + dealloc(gbt); + } +out: + return gbt; +} + +int generator_getbest(ckpool_t *ckp, char *hash) +{ + gdata_t *gdata = ckp->gdata; + int ret = GETBEST_FAILED; + server_instance_t *si; + connsock_t *cs; + + si = gdata->current_si; + if (unlikely(!si)) { + LOGWARNING("No live current server in generator_getbest"); + goto out; + } + if (si->notify) { + ret = GETBEST_NOTIFY; + goto out; + } + cs = &si->cs; + if (unlikely(!get_bestblockhash(cs, hash))) { + LOGWARNING("Failed to get best block hash from %s:%s", cs->url, cs->port); + goto out; + } + ret = GETBEST_SUCCESS; +out: + return ret; +} + +bool generator_checkaddr(ckpool_t *ckp, const char *addr, bool *script, bool *segwit) +{ + gdata_t *gdata = ckp->gdata; + server_instance_t *si; + int ret = false; + connsock_t *cs; + + si = gdata->current_si; + if (unlikely(!si)) { + LOGWARNING("No live current server in generator_checkaddr"); + goto out; + } + cs = &si->cs; + ret = validate_address(cs, addr, script, segwit); +out: + return ret; +} + +bool generator_checktxn(const ckpool_t *ckp, const char *txn, json_t **val) +{ + gdata_t *gdata = ckp->gdata; + server_instance_t *si; + bool ret = false; + connsock_t *cs; + + si = gdata->current_si; + if (unlikely(!si)) { + LOGWARNING("No live current server in generator_checkaddr"); + goto out; + } + cs = &si->cs; + *val = validate_txn(cs, txn); + if (*val) + ret = true; +out: + return ret; +} + +char *generator_get_txn(ckpool_t *ckp, const char *hash) +{ + gdata_t *gdata = ckp->gdata; + server_instance_t *si; + char *ret = NULL; + connsock_t *cs; + + si = gdata->current_si; + if (unlikely(!si)) { + LOGWARNING("No live current server in generator_get_txn"); + goto out; + } + cs = &si->cs; + ret = get_txn(cs, hash); +out: + return ret; +} + +static bool parse_notify(ckpool_t *ckp, proxy_instance_t *proxi, json_t *val) +{ + const char *prev_hash, *bbversion, *nbit, *ntime; + gdata_t *gdata = proxi->ckp->gdata; + char *coinbase1, *coinbase2; + const char *jobidbuf; + bool clean, ret = false; + notify_instance_t *ni; + json_t *arr, *job_id; + int merkles, i; + + arr = json_array_get(val, 4); + if (!arr || !json_is_array(arr)) + goto out; + + merkles = json_array_size(arr); + job_id = json_copy(json_array_get(val, 0)); + prev_hash = __json_array_string(val, 1); + coinbase1 = json_array_string(val, 2); + coinbase2 = json_array_string(val, 3); + bbversion = __json_array_string(val, 5); + nbit = __json_array_string(val, 6); + ntime = __json_array_string(val, 7); + clean = json_is_true(json_array_get(val, 8)); + if (!job_id || !prev_hash || !coinbase1 || !coinbase2 || !bbversion || !nbit || !ntime) { + if (job_id) + json_decref(job_id); + if (coinbase1) + free(coinbase1); + if (coinbase2) + free(coinbase2); + goto out; + } + + LOGDEBUG("Received new notify from proxy %d:%d", proxi->id, proxi->subid); + ni = ckzalloc(sizeof(notify_instance_t)); + ni->jobid = job_id; + jobidbuf = json_string_value(job_id); + LOGDEBUG("JobID %s", jobidbuf); + ni->coinbase1 = coinbase1; + LOGDEBUG("Coinbase1 %s", coinbase1); + ni->coinb1len = strlen(coinbase1) / 2; + ni->coinbase2 = coinbase2; + LOGDEBUG("Coinbase2 %s", coinbase2); + memcpy(ni->prevhash, prev_hash, 65); + LOGDEBUG("Prevhash %s", prev_hash); + memcpy(ni->bbversion, bbversion, 9); + LOGDEBUG("BBVersion %s", bbversion); + memcpy(ni->nbit, nbit, 9); + LOGDEBUG("Nbit %s", nbit); + memcpy(ni->ntime, ntime, 9); + LOGDEBUG("Ntime %s", ntime); + ni->clean = clean; + LOGDEBUG("Clean %s", clean ? "true" : "false"); + LOGDEBUG("Merkles %d", merkles); + for (i = 0; i < merkles; i++) { + const char *merkle = __json_array_string(arr, i); + + LOGDEBUG("Merkle %d %s", i, merkle); + memcpy(&ni->merklehash[i][0], merkle, 65); + } + ni->merkles = merkles; + ret = true; + ni->notify_time = time(NULL); + + /* Add the notify instance to the parent proxy list, not the subproxy */ + mutex_lock(&gdata->notify_lock); + ni->id64 = gdata->proxy_notify_id++; + HASH_ADD_I64(gdata->notify_instances, id64, ni); + mutex_unlock(&gdata->notify_lock); + + send_notify(ckp, proxi, ni); +out: + return ret; +} + +static bool parse_diff(proxy_instance_t *proxi, json_t *val) +{ + double diff = json_number_value(json_array_get(val, 0)); + + if (diff == 0 || diff == proxi->diff) + return true; + proxi->diff = diff; + return true; +} + +static bool send_version(proxy_instance_t *proxi, json_t *val) +{ + json_t *json_msg, *id_val = json_object_dup(val, "id"); + bool ret; + + JSON_CPACK(json_msg, "{sossso}", "id", id_val, "result", PACKAGE"/"VERSION, + "error", json_null()); + ret = send_json_msg(&proxi->cs, json_msg); + json_decref(json_msg); + return ret; +} + +static bool show_message(json_t *val) +{ + const char *msg; + + if (!json_is_array(val)) + return false; + msg = json_string_value(json_array_get(val, 0)); + if (!msg) + return false; + LOGNOTICE("Pool message: %s", msg); + return true; +} + +static bool send_pong(proxy_instance_t *proxi, json_t *val) +{ + json_t *json_msg, *id_val = json_object_dup(val, "id"); + bool ret; + + JSON_CPACK(json_msg, "{sossso}", "id", id_val, "result", "pong", + "error", json_null()); + ret = send_json_msg(&proxi->cs, json_msg); + json_decref(json_msg); + return ret; +} + +static void prepare_proxy(proxy_instance_t *proxi); + +/* Creates a duplicate instance or proxi to be used as a subproxy, ignoring + * fields we don't use in the subproxy. */ +static proxy_instance_t *create_subproxy(ckpool_t *ckp, gdata_t *gdata, proxy_instance_t *proxi, + const char *url, const char *baseurl) +{ + proxy_instance_t *subproxy; + + mutex_lock(&gdata->lock); + if (gdata->dead_proxies) { + /* Recycle an old proxy instance if one exists */ + subproxy = gdata->dead_proxies; + DL_DELETE(gdata->dead_proxies, subproxy); + } else { + gdata->subproxies_generated++; + subproxy = ckzalloc(sizeof(proxy_instance_t)); + } + mutex_unlock(&gdata->lock); + + subproxy->cs.ckp = subproxy->ckp = ckp; + + mutex_lock(&proxi->proxy_lock); + subproxy->subid = ++proxi->subproxy_count; + mutex_unlock(&proxi->proxy_lock); + + subproxy->id = proxi->id; + subproxy->userid = proxi->userid; + subproxy->global = proxi->global; + subproxy->url = strdup(url); + subproxy->baseurl = strdup(baseurl); + subproxy->auth = strdup(proxi->auth); + subproxy->pass = strdup(proxi->pass); + subproxy->parent = proxi; + subproxy->epfd = proxi->epfd; + cksem_init(&subproxy->cs.sem); + cksem_post(&subproxy->cs.sem); + return subproxy; +} + +static void add_subproxy(proxy_instance_t *proxi, proxy_instance_t *subproxy) +{ + mutex_lock(&proxi->proxy_lock); + HASH_ADD(sh, proxi->subproxies, subid, sizeof(int), subproxy); + mutex_unlock(&proxi->proxy_lock); +} + +static proxy_instance_t *__subproxy_by_id(proxy_instance_t *proxy, const int subid) +{ + proxy_instance_t *subproxy; + + HASH_FIND(sh, proxy->subproxies, &subid, sizeof(int), subproxy); + return subproxy; +} + +/* Add to the dead list to be recycled if possible */ +static void store_proxy(gdata_t *gdata, proxy_instance_t *proxy) +{ + LOGINFO("Recycling data from proxy %d:%d", proxy->id, proxy->subid); + + mutex_lock(&gdata->lock); + dealloc(proxy->enonce1); + dealloc(proxy->url); + dealloc(proxy->baseurl); + dealloc(proxy->auth); + dealloc(proxy->pass); + memset(proxy, 0, sizeof(proxy_instance_t)); + DL_APPEND(gdata->dead_proxies, proxy); + mutex_unlock(&gdata->lock); +} + +/* The difference between a dead proxy and a deleted one is the parent proxy entry + * is not removed from the stratifier as it assumes it is down whereas a deleted + * proxy has had its entry removed from the generator. */ +static void send_stratifier_deadproxy(ckpool_t *ckp, const int id, const int subid) +{ + char buf[256]; + + if (ckp->passthrough) + return; + sprintf(buf, "deadproxy=%d:%d", id, subid); + send_proc(ckp->stratifier, buf); +} + +static void send_stratifier_delproxy(ckpool_t *ckp, const int id, const int subid) +{ + char buf[256]; + + if (ckp->passthrough) + return; + sprintf(buf, "delproxy=%d:%d", id, subid); + send_proc(ckp->stratifier, buf); +} + +/* Close the subproxy socket if it's open and remove it from the epoll list */ +static void close_proxy_socket(proxy_instance_t *proxy, proxy_instance_t *subproxy) +{ + if (subproxy->cs.fd > 0) { + epoll_ctl(proxy->epfd, EPOLL_CTL_DEL, subproxy->cs.fd, NULL); + Close(subproxy->cs.fd); + } +} + +/* Remove the subproxy from the proxi list and put it on the dead list. + * Further use of the subproxy pointer may point to a new proxy but will not + * dereference. This will only disable subproxies so parent proxies need to + * have their disabled bool set manually. */ +static void disable_subproxy(gdata_t *gdata, proxy_instance_t *proxi, proxy_instance_t *subproxy) +{ + subproxy->alive = false; + send_stratifier_deadproxy(gdata->ckp, subproxy->id, subproxy->subid); + close_proxy_socket(proxi, subproxy); + if (parent_proxy(subproxy)) + return; + + subproxy->disabled = true; + + mutex_lock(&proxi->proxy_lock); + /* Make sure subproxy is still in the list */ + subproxy = __subproxy_by_id(proxi, subproxy->subid); + if (likely(subproxy)) + HASH_DELETE(sh, proxi->subproxies, subproxy); + mutex_unlock(&proxi->proxy_lock); + + if (subproxy) { + send_stratifier_deadproxy(gdata->ckp, subproxy->id, subproxy->subid); + store_proxy(gdata, subproxy); + } +} + +static bool parse_reconnect(proxy_instance_t *proxy, json_t *val) +{ + bool sameurl = false, ret = false; + ckpool_t *ckp = proxy->ckp; + gdata_t *gdata = ckp->gdata; + proxy_instance_t *parent; + const char *new_url; + int new_port; + char *url; + + new_url = json_string_value(json_array_get(val, 0)); + new_port = json_integer_value(json_array_get(val, 1)); + /* See if we have an invalid entry listing port as a string instead of + * integer and handle that. */ + if (!new_port) { + const char *newport_string = json_string_value(json_array_get(val, 1)); + + if (newport_string) + sscanf(newport_string, "%d", &new_port); + } + if (new_url && strlen(new_url) && new_port) { + char *dot_pool, *dot_reconnect; + int len; + + dot_pool = strchr(proxy->url, '.'); + if (!dot_pool) { + LOGWARNING("Denied stratum reconnect request from server without domain %s", + proxy->url); + goto out; + } + dot_reconnect = strchr(new_url, '.'); + if (!dot_reconnect) { + LOGWARNING("Denied stratum reconnect request to url without domain %s", + new_url); + goto out; + } + len = strlen(dot_reconnect); + if (strncmp(dot_pool, dot_reconnect, len)) { + LOGWARNING("Denied stratum reconnect request from %s to non-matching domain %s", + proxy->url, new_url); + goto out; + } + ASPRINTF(&url, "%s:%d", new_url, new_port); + } else { + url = strdup(proxy->url); + sameurl = true; + } + LOGINFO("Processing reconnect request to %s", url); + + ret = true; + parent = proxy->parent; + disable_subproxy(gdata, parent, proxy); + if (parent != proxy) { + /* If this is a subproxy we only need to create a new one if + * the url has changed. Otherwise automated recruiting will + * take care of creating one if needed. */ + if (!sameurl) + create_subproxy(ckp, gdata, parent, url, parent->baseurl); + goto out; + } + + proxy->reconnect = true; + LOGWARNING("Proxy %d:%s reconnect issue to %s, dropping existing connection", + proxy->id, proxy->url, url); + if (!sameurl) { + char *oldurl = proxy->url; + + proxy->url = url; + free(oldurl); + } else + free(url); +out: + return ret; +} + +static void send_diff(ckpool_t *ckp, proxy_instance_t *proxi) +{ + proxy_instance_t *proxy = proxi->parent; + json_t *json_msg; + char *msg, *buf; + + /* Not set yet */ + if (!proxi->diff) + return; + + JSON_CPACK(json_msg, "{sIsisf}", + "proxy", proxy->id, + "subproxy", proxi->subid, + "diff", proxi->diff); + msg = json_dumps(json_msg, JSON_NO_UTF8); + json_decref(json_msg); + ASPRINTF(&buf, "diff=%s", msg); + free(msg); + send_proc(ckp->stratifier, buf); + free(buf); +} + +static void send_notify(ckpool_t *ckp, proxy_instance_t *proxi, notify_instance_t *ni) +{ + proxy_instance_t *proxy = proxi->parent; + json_t *json_msg, *merkle_arr; + char *msg, *buf; + int i; + + merkle_arr = json_array(); + + for (i = 0; i < ni->merkles; i++) + json_array_append_new(merkle_arr, json_string(&ni->merklehash[i][0])); + /* Use our own jobid instead of the server's one for easy lookup */ + JSON_CPACK(json_msg, "{sIsisisssisssssosssssssb}", + "proxy", proxy->id, "subproxy", proxi->subid, + "jobid", ni->id64, "prevhash", ni->prevhash, "coinb1len", ni->coinb1len, + "coinbase1", ni->coinbase1, "coinbase2", ni->coinbase2, + "merklehash", merkle_arr, "bbversion", ni->bbversion, + "nbit", ni->nbit, "ntime", ni->ntime, + "clean", ni->clean); + + msg = json_dumps(json_msg, JSON_NO_UTF8); + json_decref(json_msg); + ASPRINTF(&buf, "notify=%s", msg); + free(msg); + send_proc(ckp->stratifier, buf); + free(buf); + + /* Send diff now as stratifier will not accept diff till it has a + * valid workbase */ + send_diff(ckp, proxi); +} + +static bool parse_method(ckpool_t *ckp, proxy_instance_t *proxi, const char *msg) +{ + json_t *val = NULL, *method, *err_val, *params; + json_error_t err; + bool ret = false; + const char *buf; + + if (!msg) + goto out; + memset(&err, 0, sizeof(err)); + val = json_loads(msg, 0, &err); + if (!val) { + if (proxi->global) { + LOGWARNING("JSON decode of proxy %d:%s msg %s failed(%d): %s", + proxi->id, proxi->url, msg, err.line, err.text); + } else { + LOGNOTICE("JSON decode of proxy %d:%s msg %s failed(%d): %s", + proxi->id, proxi->url, msg, err.line, err.text); + } + goto out; + } + + method = json_object_get(val, "method"); + if (!method) { + /* Likely a share, look for harmless unhandled methods in + * pool response */ + if (strstr(msg, "mining.suggest")) { + LOGINFO("Unhandled suggest_diff from proxy %d:%s", proxi->id, proxi->url); + ret = true; + } else + LOGDEBUG("Failed to find method in json for parse_method"); + goto out; + } + err_val = json_object_get(val, "error"); + params = json_object_get(val, "params"); + + if (err_val && !json_is_null(err_val)) { + char *ss; + + if (err_val) + ss = json_dumps(err_val, 0); + else + ss = strdup("(unknown reason)"); + + LOGINFO("JSON-RPC method decode failed: %s", ss); + free(ss); + goto out; + } + + if (!json_is_string(method)) { + LOGINFO("Method is not string in parse_method"); + goto out; + } + buf = json_string_value(method); + if (!buf || strlen(buf) < 1) { + LOGINFO("Invalid string for method in parse_method"); + goto out; + } + + LOGDEBUG("Proxy %d:%d received method %s", proxi->id, proxi->subid, buf); + if (cmdmatch(buf, "mining.notify")) { + ret = parse_notify(ckp, proxi, params); + goto out; + } + + if (cmdmatch(buf, "mining.set_difficulty")) { + ret = parse_diff(proxi, params); + if (likely(ret)) + send_diff(ckp, proxi); + goto out; + } + + if (cmdmatch(buf, "client.reconnect")) { + ret = parse_reconnect(proxi, params); + goto out; + } + + if (cmdmatch(buf, "client.get_version")) { + ret = send_version(proxi, val); + goto out; + } + + if (cmdmatch(buf, "client.show_message")) { + ret = show_message(params); + goto out; + } + + if (cmdmatch(buf, "mining.ping")) { + ret = send_pong(proxi, val); + goto out; + } +out: + if (val) + json_decref(val); + return ret; +} + +/* cs semaphore must be held */ +static bool auth_stratum(ckpool_t *ckp, connsock_t *cs, proxy_instance_t *proxi) +{ + json_t *val = NULL, *res_val, *req, *err_val; + char *buf = NULL; + bool ret; + + JSON_CPACK(req, "{s:i,s:s,s:[s,s]}", + "id", 42, + "method", "mining.authorize", + "params", proxi->auth, proxi->pass); + ret = send_json_msg(cs, req); + json_decref(req); + if (!ret) { + LOGNOTICE("Proxy %d:%d %s failed to send message in auth_stratum", + proxi->id, proxi->subid, proxi->url); + if (cs->fd > 0) { + epoll_ctl(proxi->epfd, EPOLL_CTL_DEL, cs->fd, NULL); + Close(cs->fd); + } + goto out; + } + + /* Read and parse any extra methods sent. Anything left in the buffer + * should be the response to our auth request. */ + do { + free(buf); + buf = next_proxy_line(cs, proxi); + if (!buf) { + LOGNOTICE("Proxy %d:%d %s failed to receive line in auth_stratum", + proxi->id, proxi->subid, proxi->url); + ret = false; + goto out; + } + ret = parse_method(ckp, proxi, buf); + } while (ret); + + val = json_msg_result(buf, &res_val, &err_val); + if (!val) { + if (proxi->global) { + LOGWARNING("Proxy %d:%d %s failed to get a json result in auth_stratum, got: %s", + proxi->id, proxi->subid, proxi->url, buf); + } else { + LOGNOTICE("Proxy %d:%d %s failed to get a json result in auth_stratum, got: %s", + proxi->id, proxi->subid, proxi->url, buf); + } + goto out; + } + + if (err_val && !json_is_null(err_val)) { + LOGWARNING("Proxy %d:%d %s failed to authorise in auth_stratum due to err_val, got: %s", + proxi->id, proxi->subid, proxi->url, buf); + goto out; + } + if (res_val) { + ret = json_is_true(res_val); + if (!ret) { + if (proxi->global) { + LOGWARNING("Proxy %d:%d %s failed to authorise in auth_stratum, got: %s", + proxi->id, proxi->subid, proxi->url, buf); + } else { + LOGNOTICE("Proxy %d:%d %s failed to authorise in auth_stratum, got: %s", + proxi->id, proxi->subid, proxi->url, buf); + } + goto out; + } + } else { + /* No result and no error but successful val means auth success */ + ret = true; + } + LOGINFO("Proxy %d:%d %s auth success in auth_stratum", proxi->id, proxi->subid, proxi->url); +out: + if (val) + json_decref(val); + if (ret) { + /* Now parse any cached responses so there are none in the + * queue and they can be managed one at a time from now on. */ + while(42) { + dealloc(buf); + buf = cached_proxy_line(proxi); + if (!buf) + break; + parse_method(ckp, proxi, buf); + }; + } + return ret; +} + +static proxy_instance_t *proxy_by_id(gdata_t *gdata, const int id) +{ + proxy_instance_t *proxi; + + mutex_lock(&gdata->lock); + HASH_FIND_INT(gdata->proxies, &id, proxi); + mutex_unlock(&gdata->lock); + + return proxi; +} + +static void send_subscribe(ckpool_t *ckp, proxy_instance_t *proxi) +{ + json_t *json_msg; + char *msg, *buf; + + JSON_CPACK(json_msg, "{ss,ss,ss,ss,sI,si,ss,si,sb,si}", + "baseurl", proxi->baseurl, + "url", proxi->url, "auth", proxi->auth, "pass", proxi->pass, + "proxy", proxi->id, "subproxy", proxi->subid, + "enonce1", proxi->enonce1, "nonce2len", proxi->nonce2len, + "global", proxi->global, "userid", proxi->userid); + msg = json_dumps(json_msg, JSON_NO_UTF8); + json_decref(json_msg); + ASPRINTF(&buf, "subscribe=%s", msg); + free(msg); + send_proc(ckp->stratifier, buf); + free(buf); +} + +static proxy_instance_t *subproxy_by_id(proxy_instance_t *proxy, const int subid) +{ + proxy_instance_t *subproxy; + + mutex_lock(&proxy->proxy_lock); + subproxy = __subproxy_by_id(proxy, subid); + mutex_unlock(&proxy->proxy_lock); + + return subproxy; +} + +static void drop_proxy(gdata_t *gdata, const char *buf) +{ + proxy_instance_t *proxy, *subproxy; + int id = -1, subid = -1; + + sscanf(buf, "dropproxy=%d:%d", &id, &subid); + if (unlikely(!subid)) { + LOGWARNING("Generator asked to drop parent proxy %d", id); + return; + } + proxy = proxy_by_id(gdata, id); + if (unlikely(!proxy)) { + LOGINFO("Generator asked to drop subproxy from non-existent parent %d", id); + return; + } + subproxy = subproxy_by_id(proxy, subid); + if (!subproxy) { + LOGINFO("Generator asked to drop non-existent subproxy %d:%d", id, subid); + return; + } + LOGNOTICE("Generator asked to drop proxy %d:%d", id, subid); + disable_subproxy(gdata, proxy, subproxy); +} + +static void stratifier_reconnect_client(ckpool_t *ckp, const int64_t id) +{ + char buf[256]; + + sprintf(buf, "reconnclient=%"PRId64, id); + send_proc(ckp->stratifier, buf); +} + +/* Add a share to the gdata share hashlist. Returns the share id */ +static int add_share(gdata_t *gdata, const int64_t client_id, const double diff) +{ + share_msg_t *share = ckzalloc(sizeof(share_msg_t)), *tmpshare; + time_t now; + int ret; + + share->submit_time = now = time(NULL); + share->client_id = client_id; + share->diff = diff; + + /* Add new share entry to the share hashtable. Age old shares */ + mutex_lock(&gdata->share_lock); + ret = share->id64 = gdata->share_id++; + HASH_ADD_I64(gdata->shares, id64, share); + HASH_ITER(hh, gdata->shares, share, tmpshare) { + if (share->submit_time < now - 120) { + HASH_DEL(gdata->shares, share); + free(share); + } + } + mutex_unlock(&gdata->share_lock); + + return ret; +} + +static void submit_share(gdata_t *gdata, json_t *val) +{ + proxy_instance_t *proxy, *proxi; + ckpool_t *ckp = gdata->ckp; + int id, subid, share_id; + bool success = false; + stratum_msg_t *msg; + int64_t client_id; + + /* Get the client id so we can tell the stratifier to drop it if the + * proxy it's bound to is not functional */ + if (unlikely(!json_get_int64(&client_id, val, "client_id"))) { + LOGWARNING("Got no client_id in share"); + goto out; + } + if (unlikely(!json_get_int(&id, val, "proxy"))) { + LOGWARNING("Got no proxy in share"); + goto out; + } + if (unlikely(!json_get_int(&subid, val, "subproxy"))) { + LOGWARNING("Got no subproxy in share"); + goto out; + } + proxy = proxy_by_id(gdata, id); + if (unlikely(!proxy)) { + LOGINFO("Client %"PRId64" sending shares to non existent proxy %d, dropping", + client_id, id); + stratifier_reconnect_client(ckp, client_id); + goto out; + } + proxi = subproxy_by_id(proxy, subid); + if (unlikely(!proxi)) { + LOGINFO("Client %"PRId64" sending shares to non existent subproxy %d:%d, dropping", + client_id, id, subid); + stratifier_reconnect_client(ckp, client_id); + goto out; + } + if (!proxi->alive) { + LOGINFO("Client %"PRId64" sending shares to dead subproxy %d:%d, dropping", + client_id, id, subid); + stratifier_reconnect_client(ckp, client_id); + goto out; + } + + success = true; + msg = ckzalloc(sizeof(stratum_msg_t)); + msg->json_msg = val; + share_id = add_share(gdata, client_id, proxi->diff); + json_set_int(val, "id", share_id); + + /* Add the new message to the psend list */ + mutex_lock(&gdata->psend_lock); + gdata->psends_generated++; + DL_APPEND(gdata->psends, msg); + pthread_cond_signal(&gdata->psend_cond); + mutex_unlock(&gdata->psend_lock); + +out: + if (!success) + json_decref(val); +} + +static void clear_notify(notify_instance_t *ni) +{ + if (ni->jobid) + json_decref(ni->jobid); + free(ni->coinbase1); + free(ni->coinbase2); + free(ni); +} + +/* Entered with proxy_lock held */ +static void __decay_proxy(proxy_instance_t *proxy, proxy_instance_t * parent, const double diff) +{ + double tdiff; + tv_t now_t; + + tv_time(&now_t); + tdiff = sane_tdiff(&now_t, &proxy->last_decay); + decay_time(&proxy->dsps1, diff, tdiff, MIN1); + decay_time(&proxy->dsps5, diff, tdiff, MIN5); + decay_time(&proxy->dsps60, diff, tdiff, HOUR); + decay_time(&proxy->dsps1440, diff, tdiff, DAY); + copy_tv(&proxy->last_decay, &now_t); + + tdiff = sane_tdiff(&now_t, &parent->total_last_decay); + decay_time(&parent->tdsps1, diff, tdiff, MIN1); + decay_time(&parent->tdsps5, diff, tdiff, MIN5); + decay_time(&parent->tdsps60, diff, tdiff, HOUR); + decay_time(&parent->tdsps1440, diff, tdiff, DAY); + copy_tv(&parent->total_last_decay, &now_t); +} + +static void account_shares(proxy_instance_t *proxy, const double diff, const bool result) +{ + proxy_instance_t *parent = proxy->parent; + + mutex_lock(&parent->proxy_lock); + if (result) { + proxy->diff_accepted += diff; + parent->total_accepted += diff; + __decay_proxy(proxy, parent, diff); + } else { + proxy->diff_rejected += diff; + parent->total_rejected += diff; + __decay_proxy(proxy, parent, 0); + } + mutex_unlock(&parent->proxy_lock); +} + +/* Returns zero if it is not recognised as a share, 1 if it is a valid share + * and -1 if it is recognised as a share but invalid. */ +static int parse_share(gdata_t *gdata, proxy_instance_t *proxi, const char *buf) +{ + json_t *val = NULL, *idval; + bool result = false; + share_msg_t *share; + int ret = 0; + int64_t id; + + val = json_loads(buf, 0, NULL); + if (unlikely(!val)) { + LOGINFO("Failed to parse upstream json msg: %s", buf); + goto out; + } + idval = json_object_get(val, "id"); + if (unlikely(!idval)) { + LOGINFO("Failed to find id in upstream json msg: %s", buf); + goto out; + } + id = json_integer_value(idval); + if (unlikely(!json_get_bool(&result, val, "result"))) { + LOGINFO("Failed to find result in upstream json msg: %s", buf); + goto out; + } + + mutex_lock(&gdata->share_lock); + HASH_FIND_I64(gdata->shares, &id, share); + if (share) { + HASH_DEL(gdata->shares, share); + free(share); + } + mutex_unlock(&gdata->share_lock); + + if (!share) { + LOGINFO("Proxy %d:%d failed to find matching share to result: %s", + proxi->id, proxi->subid, buf); + /* We don't know what diff these shares are so assume the + * current proxy diff. */ + account_shares(proxi, proxi->diff, result); + ret = -1; + goto out; + } + ret = 1; + account_shares(proxi, share->diff, result); + LOGINFO("Proxy %d:%d share result %s from client %"PRId64, proxi->id, proxi->subid, + buf, share->client_id); + free(share); +out: + if (val) + json_decref(val); + return ret; +} + +struct cs_msg { + cs_msg_t *next; + cs_msg_t *prev; + proxy_instance_t *proxy; + char *buf; + int len; + int ofs; +}; + +/* Sends all messages in the queue ready to be dispatched, leaving those that + * would block to be handled next pass */ +static void send_json_msgq(gdata_t *gdata, cs_msg_t **csmsgq) +{ + cs_msg_t *csmsg, *tmp; + int ret; + + DL_FOREACH_SAFE(*csmsgq, csmsg, tmp) { + proxy_instance_t *proxy = csmsg->proxy; + + /* Only try to send one message at a time to each proxy + * to avoid sending parts of different messages */ + if (proxy->sending && proxy->sending != csmsg) + continue; + while (csmsg->len > 0) { + int fd; + + if (unlikely(!proxy->alive)) { + LOGDEBUG("Dropping send message to dead proxy %d:%d in send_json_msgq", + proxy->id, proxy->subid); + csmsg->len = 0; + break; + } + proxy->sending = csmsg; + fd = proxy->cs.fd; + ret = send(fd, csmsg->buf + csmsg->ofs, csmsg->len, MSG_DONTWAIT); + if (ret < 1) { + if (!ret) + break; + ret = 0; + if (errno == EAGAIN || errno == EWOULDBLOCK) + break; + csmsg->len = 0; + LOGNOTICE("Proxy %d:%d %s failed to send msg in send_json_msgq, dropping", + proxy->id, proxy->subid, proxy->url); + disable_subproxy(gdata, proxy->parent, proxy); + } + csmsg->ofs += ret; + csmsg->len -= ret; + } + if (csmsg->len < 1) { + proxy->sending = NULL; + DL_DELETE(*csmsgq, csmsg); + free(csmsg->buf); + free(csmsg); + } + } +} + +static void add_json_msgq(cs_msg_t **csmsgq, proxy_instance_t *proxy, json_t **val) +{ + cs_msg_t *csmsg = ckzalloc(sizeof(cs_msg_t)); + + csmsg->buf = json_dumps(*val, JSON_ESCAPE_SLASH | JSON_EOL); + json_decref(*val); + *val = NULL; + if (unlikely(!csmsg->buf)) { + LOGWARNING("Failed to create json dump in add_json_msgq"); + return; + } + csmsg->len = strlen(csmsg->buf); + csmsg->proxy = proxy; + DL_APPEND(*csmsgq, csmsg); +} + +/* For processing and sending shares. proxy refers to parent proxy here */ +static void *proxy_send(void *arg) +{ + ckpool_t *ckp = (ckpool_t *)arg; + gdata_t *gdata = ckp->gdata; + stratum_msg_t *msg = NULL; + cs_msg_t *csmsgq = NULL; + + rename_proc("proxysend"); + + pthread_detach(pthread_self()); + + while (42) { + proxy_instance_t *proxy, *subproxy; + int proxyid = 0, subid = 0; + int64_t client_id = 0, id; + notify_instance_t *ni; + json_t *jobid = NULL; + json_t *val; + + if (unlikely(msg)) { + json_decref(msg->json_msg); + free(msg); + } + + mutex_lock(&gdata->psend_lock); + if (!gdata->psends) { + /* Poll every 10ms */ + const ts_t polltime = {0, 10000000}; + ts_t timeout_ts; + + ts_realtime(&timeout_ts); + timeraddspec(&timeout_ts, &polltime); + cond_timedwait(&gdata->psend_cond, &gdata->psend_lock, &timeout_ts); + } + msg = gdata->psends; + if (likely(msg)) + DL_DELETE(gdata->psends, msg); + mutex_unlock(&gdata->psend_lock); + + if (!msg) { + send_json_msgq(gdata, &csmsgq); + continue; + } + + if (unlikely(!json_get_int(&subid, msg->json_msg, "subproxy"))) { + LOGWARNING("Failed to find subproxy in proxy_send msg"); + continue; + } + if (unlikely(!json_get_int64(&id, msg->json_msg, "jobid"))) { + LOGWARNING("Failed to find jobid in proxy_send msg"); + continue; + } + if (unlikely(!json_get_int(&proxyid, msg->json_msg, "proxy"))) { + LOGWARNING("Failed to find proxy in proxy_send msg"); + continue; + } + if (unlikely(!json_get_int64(&client_id, msg->json_msg, "client_id"))) { + LOGWARNING("Failed to find client_id in proxy_send msg"); + continue; + } + proxy = proxy_by_id(gdata, proxyid); + if (unlikely(!proxy)) { + LOGWARNING("Proxysend for got message for non-existent proxy %d", + proxyid); + continue; + } + subproxy = subproxy_by_id(proxy, subid); + if (unlikely(!subproxy)) { + LOGWARNING("Proxysend for got message for non-existent subproxy %d:%d", + proxyid, subid); + continue; + } + + mutex_lock(&gdata->notify_lock); + HASH_FIND_INT(gdata->notify_instances, &id, ni); + if (ni) + jobid = json_copy(ni->jobid); + mutex_unlock(&gdata->notify_lock); + + if (unlikely(!jobid)) { + stratifier_reconnect_client(ckp, client_id); + LOGNOTICE("Proxy %d:%s failed to find matching jobid in proxysend", + subproxy->id, subproxy->url); + continue; + } + + JSON_CPACK(val, "{s[soooo]soss}", "params", subproxy->auth, jobid, + json_object_dup(msg->json_msg, "nonce2"), + json_object_dup(msg->json_msg, "ntime"), + json_object_dup(msg->json_msg, "nonce"), + "id", json_object_dup(msg->json_msg, "id"), + "method", "mining.submit"); + add_json_msgq(&csmsgq, subproxy, &val); + send_json_msgq(gdata, &csmsgq); + } + return NULL; +} + +static void passthrough_send(ckpool_t *ckp, pass_msg_t *pm) +{ + proxy_instance_t *proxy = pm->proxy; + connsock_t *cs = pm->cs; + int len, sent; + + if (unlikely(!proxy->alive || cs->fd < 0)) { + LOGDEBUG("Dropping send to dead proxy of upstream json msg: %s", pm->msg); + goto out; + } + LOGDEBUG("Sending upstream json msg: %s", pm->msg); + len = strlen(pm->msg); + sent = write_socket(cs->fd, pm->msg, len); + if (unlikely(sent != len)) { + LOGWARNING("Failed to passthrough %d bytes of message %s, attempting reconnect", + len, pm->msg); + Close(cs->fd); + proxy->alive = false; + reconnect_generator(ckp); + } +out: + free(pm->msg); + free(pm); +} + +static void passthrough_add_send(proxy_instance_t *proxy, char *msg) +{ + pass_msg_t *pm = ckzalloc(sizeof(pass_msg_t)); + + pm->proxy = proxy; + pm->cs = &proxy->cs; + pm->msg = msg; + ckmsgq_add(proxy->passsends, pm); +} + +void generator_add_send(ckpool_t *ckp, json_t *val) +{ + gdata_t *gdata = ckp->gdata; + char *buf; + + if (!ckp->passthrough) { + submit_share(gdata, val); + return; + } + if (unlikely(!gdata->current_proxy)) { + LOGWARNING("No current proxy to send passthrough data to"); + goto out; + } + buf = json_dumps(val, JSON_COMPACT | JSON_EOL); + if (unlikely(!buf)) { + LOGWARNING("Unable to decode json in generator_add_send"); + goto out; + } + passthrough_add_send(gdata->current_proxy, buf); +out: + json_decref(val); +} + +static void suggest_diff(ckpool_t *ckp, connsock_t *cs, proxy_instance_t *proxy) +{ + json_t *req; + bool ret; + + JSON_CPACK(req, "{s:i,s:s, s:[I]}", + "id", 41, + "method", "mining.suggest", + "params", ckp->mindiff); + ret = send_json_msg(cs, req); + json_decref(req); + if (!ret) { + LOGNOTICE("Proxy %d:%d %s failed to send message in suggest_diff", + proxy->id, proxy->subid, proxy->url); + if (cs->fd > 0) { + epoll_ctl(proxy->epfd, EPOLL_CTL_DEL, cs->fd, NULL); + Close(cs->fd); + } + } + /* We don't care about the response here. It can get filtered out later + * if it fails upstream. */ +} + +/* Upon failing connnect, subscribe, or auth, back off on the next attempt. + * This function should be called on the parent proxy */ +static void proxy_backoff(proxy_instance_t *proxy) +{ + /* Add 5 seconds with each backoff, up to maximum of 1 minute */ + if (proxy->backoff < 60) + proxy->backoff += 5; +} + +static bool proxy_alive(ckpool_t *ckp, proxy_instance_t *proxi, connsock_t *cs, + bool pinging) +{ + proxy_instance_t *parent = proxi->parent; + bool ret = false; + + /* Has this proxy already been reconnected? */ + if (proxi->alive) + return true; + if (proxi->disabled) + return false; + + /* Serialise all send/recvs here with the cs semaphore */ + cksem_wait(&cs->sem); + /* Check again after grabbing semaphore */ + if (unlikely(proxi->alive)) { + ret = true; + goto out; + } + if (!extract_sockaddr(proxi->url, &cs->url, &cs->port)) { + LOGWARNING("Failed to extract address from %s", proxi->url); + goto out; + } + if (!connect_proxy(ckp, cs, proxi)) { + if (!pinging) { + LOGINFO("Failed to connect to %s:%s in proxy_mode!", + cs->url, cs->port); + } + parent->connect_status = STATUS_FAIL; + proxy_backoff(parent); + goto out; + } + parent->connect_status = STATUS_SUCCESS; + + if (ckp->node) { + if (!node_stratum(cs, proxi)) { + LOGWARNING("Failed initial node setup to %s:%s !", + cs->url, cs->port); + goto out; + } + ret = true; + goto out; + } + if (ckp->passthrough) { + if (!passthrough_stratum(cs, proxi)) { + LOGWARNING("Failed initial passthrough to %s:%s !", + cs->url, cs->port); + goto out; + } + ret = true; + goto out; + } + /* Test we can connect, authorise and get stratum information */ + if (!subscribe_stratum(ckp, cs, proxi)) { + if (!pinging) { + LOGWARNING("Failed initial subscribe to %s:%s !", + cs->url, cs->port); + } + parent->subscribe_status = STATUS_FAIL; + proxy_backoff(parent); + goto out; + } + parent->subscribe_status = STATUS_SUCCESS; + + if (!ckp->passthrough) + send_subscribe(ckp, proxi); + if (!auth_stratum(ckp, cs, proxi)) { + if (!pinging) { + LOGWARNING("Failed initial authorise to %s:%s with %s:%s !", + cs->url, cs->port, proxi->auth, proxi->pass); + } + parent->auth_status = STATUS_FAIL; + proxy_backoff(parent); + goto out; + } + parent->auth_status = STATUS_SUCCESS; + proxi->authorised = ret = true; + parent->backoff = 0; + if (ckp->mindiff > 1) + suggest_diff(ckp, cs, proxi); +out: + if (!ret) { + send_stratifier_deadproxy(ckp, proxi->id, proxi->subid); + /* Close and invalidate the file handle */ + Close(cs->fd); + } + proxi->alive = ret; + cksem_post(&cs->sem); + + /* Decrease the parent's recruit count after sending the stratifier the + * new subscribe so it can get an accurate headroom count before + * requesting more proxies. */ + if (ret) { + proxy_instance_t *parent = proxi->parent; + + if (parent) { + mutex_lock(&parent->proxy_lock); + parent->recruit -= proxi->clients_per_proxy; + if (parent->recruit < 0) + parent->recruit = 0; + mutex_unlock(&parent->proxy_lock); + } + } + + return ret; +} + +static void *proxy_recruit(void *arg) +{ + proxy_instance_t *proxy, *parent = (proxy_instance_t *)arg; + ckpool_t *ckp = parent->ckp; + gdata_t *gdata = ckp->gdata; + bool recruit, alive; + + pthread_detach(pthread_self()); + + /* We do this in a separate thread so it's okay to sleep here */ + if (parent->backoff) + sleep(parent->backoff); + +retry: + recruit = false; + proxy = create_subproxy(ckp, gdata, parent, parent->url, parent->baseurl); + alive = proxy_alive(ckp, proxy, &proxy->cs, false); + if (!alive) { + LOGNOTICE("Subproxy failed proxy_alive testing"); + store_proxy(gdata, proxy); + } else + add_subproxy(parent, proxy); + + mutex_lock(&parent->proxy_lock); + if (alive && parent->recruit > 0) + recruit = true; + else /* Reset so the next request will try again */ + parent->recruit = 0; + mutex_unlock(&parent->proxy_lock); + + if (recruit) + goto retry; + + return NULL; +} + +static void recruit_subproxies(proxy_instance_t *proxi, const int recruits) +{ + bool recruit = false; + pthread_t pth; + + mutex_lock(&proxi->proxy_lock); + if (!proxi->recruit) + recruit = true; + if (proxi->recruit < recruits) + proxi->recruit = recruits; + mutex_unlock(&proxi->proxy_lock); + + if (recruit) + create_pthread(&pth, proxy_recruit, proxi); +} + +/* Queue up to the requested amount */ +static void recruit_subproxy(gdata_t *gdata, const char *buf) +{ + int recruits = 1, id = 0; + proxy_instance_t *proxy; + + sscanf(buf, "recruit=%d:%d", &id, &recruits); + proxy = proxy_by_id(gdata, id); + if (unlikely(!proxy)) { + LOGNOTICE("Generator failed to find proxy id %d to recruit subproxies", + id); + return; + } + recruit_subproxies(proxy, recruits); +} + +static void *proxy_reconnect(void *arg) +{ + proxy_instance_t *proxy = (proxy_instance_t *)arg; + connsock_t *cs = &proxy->cs; + ckpool_t *ckp = proxy->ckp; + + pthread_detach(pthread_self()); + if (proxy->parent->backoff) + sleep(proxy->parent->backoff); + proxy_alive(ckp, proxy, cs, true); + proxy->reconnecting = false; + return NULL; +} + +/* For reconnecting the parent proxy instance async */ +static void reconnect_proxy(proxy_instance_t *proxi) +{ + pthread_t pth; + + if (proxi->reconnecting) + return; + proxi->reconnecting = true; + create_pthread(&pth, proxy_reconnect, proxi); +} + +/* For receiving messages from an upstream pool to pass downstream. Responsible + * for setting up the connection and testing pool is live. */ +static void *passthrough_recv(void *arg) +{ + proxy_instance_t *proxi = (proxy_instance_t *)arg; + connsock_t *cs = &proxi->cs; + ckpool_t *ckp = proxi->ckp; + bool alive; + + rename_proc("passrecv"); + + proxi->parent = proxi; + if (proxy_alive(ckp, proxi, cs, false)) + LOGWARNING("Passthrough proxy %d:%s connection established", proxi->id, proxi->url); + alive = proxi->alive; + + while (42) { + float timeout = 5; + int ret; + + while (!proxy_alive(ckp, proxi, cs, true)) { + alive = false; + sleep(5); + } + if (!alive) { + reconnect_generator(ckp); + LOGWARNING("Passthrough %d:%s recovered", proxi->id, proxi->url); + alive = true; + } + + cksem_wait(&cs->sem); + ret = read_socket_line(cs, &timeout); + /* Simply forward the message on, as is, to the connector to + * process. Possibly parse parameters sent by upstream pool + * here */ + if (likely(ret > 0)) { + LOGDEBUG("Passthrough recv received upstream msg: %s", cs->buf); + send_proc(ckp->connector, cs->buf); + } else if (ret < 0) { + /* Read failure */ + LOGWARNING("Passthrough %d:%s failed to read_socket_line in passthrough_recv, attempting reconnect", + proxi->id, proxi->url); + alive = proxi->alive = false; + Close(cs->fd); + reconnect_generator(ckp); + } else /* No messages during timeout */ + LOGDEBUG("Passthrough %d:%s no messages received", proxi->id, proxi->url); + cksem_post(&cs->sem); + } + return NULL; +} + +static bool subproxies_alive(proxy_instance_t *proxy) +{ + proxy_instance_t *subproxy, *tmp; + bool ret = false; + + mutex_lock(&proxy->proxy_lock); + HASH_ITER(sh, proxy->subproxies, subproxy, tmp) { + if (subproxy->alive) { + ret = true; + break; + } + } + mutex_unlock(&proxy->proxy_lock); + + return ret; +} + +/* For receiving messages from the upstream proxy, also responsible for setting + * up the connection and testing it's alive. */ +static void *proxy_recv(void *arg) +{ + proxy_instance_t *proxi = (proxy_instance_t *)arg; + connsock_t *cs = &proxi->cs; + proxy_instance_t *subproxy; + ckpool_t *ckp = proxi->ckp; + gdata_t *gdata = ckp->gdata; + struct epoll_event event; + bool alive; + int epfd; + + rename_proc("proxyrecv"); + pthread_detach(pthread_self()); + + proxi->epfd = epfd = epoll_create1(EPOLL_CLOEXEC); + if (epfd < 0){ + LOGEMERG("FATAL: Failed to create epoll in proxyrecv"); + return NULL; + } + + if (proxy_alive(ckp, proxi, cs, false)) + LOGWARNING("Proxy %d:%s connection established", proxi->id, proxi->url); + + alive = proxi->alive; + + while (42) { + bool message = false, hup = false; + share_msg_t *share, *tmpshare; + notify_instance_t *ni, *tmp; + float timeout; + time_t now; + int ret; + + subproxy = proxi; + if (!proxi->alive) { + reconnect_proxy(proxi); + while (!subproxies_alive(proxi)) { + reconnect_proxy(proxi); + if (alive) { + reconnect_generator(ckp); + LOGWARNING("Proxy %d:%s failed, attempting reconnect", + proxi->id, proxi->url); + alive = false; + } + sleep(5); + } + } + if (!alive) { + reconnect_generator(ckp); + LOGWARNING("Proxy %d:%s recovered", proxi->id, proxi->url); + alive = true; + } + + now = time(NULL); + + /* Age old notifications older than 10 mins old */ + mutex_lock(&gdata->notify_lock); + HASH_ITER(hh, gdata->notify_instances, ni, tmp) { + if (HASH_COUNT(gdata->notify_instances) < 3) + break; + if (ni->notify_time < now - 600) { + HASH_DEL(gdata->notify_instances, ni); + clear_notify(ni); + } + } + mutex_unlock(&gdata->notify_lock); + + /* Similary with shares older than 2 mins without response */ + mutex_lock(&gdata->share_lock); + HASH_ITER(hh, gdata->shares, share, tmpshare) { + if (share->submit_time < now - 120) { + HASH_DEL(gdata->shares, share); + free(share); + } + } + mutex_unlock(&gdata->share_lock); + + cs = NULL; + /* If we don't get an update within 10 minutes the upstream pool + * has likely stopped responding. */ + ret = epoll_wait(epfd, &event, 1, 600000); + if (likely(ret > 0)) { + subproxy = event.data.ptr; + cs = &subproxy->cs; + if (!subproxy->alive) { + cs = NULL; + continue; + } + + /* Serialise messages from here once we have a cs by + * holding the semaphore. */ + cksem_wait(&cs->sem); + /* Process any messages before checking for errors in + * case a message is sent and then the socket + * immediately closed. + */ + if (event.events & EPOLLIN) { + timeout = 30; + ret = read_socket_line(cs, &timeout); + /* If we are unable to read anything within 30 + * seconds at this point after EPOLLIN is set + * then the socket is dead. */ + if (ret < 1) { + LOGNOTICE("Proxy %d:%d %s failed to read_socket_line in proxy_recv", + proxi->id, subproxy->subid, subproxy->url); + hup = true; + } else { + message = true; + timeout = 0; + } + } + if (event.events & (EPOLLHUP | EPOLLERR | EPOLLRDHUP)) { + LOGNOTICE("Proxy %d:%d %s epoll hangup in proxy_recv", + proxi->id, subproxy->subid, subproxy->url); + hup = true; + } + } else { + LOGNOTICE("Proxy %d:%d %s failed to epoll in proxy_recv", + proxi->id, subproxy->subid, subproxy->url); + hup = true; + } + + /* Parse any other messages already fully buffered with a zero + * timeout. */ + while (message || read_socket_line(cs, &timeout) > 0) { + message = false; + timeout = 0; + /* subproxy may have been recycled here if it is not a + * parent and reconnect was issued */ + if (parse_method(ckp, subproxy, cs->buf)) + continue; + /* If it's not a method it should be a share result */ + if (!parse_share(gdata, subproxy, cs->buf)) { + LOGNOTICE("Proxy %d:%d unhandled stratum message: %s", + subproxy->id, subproxy->subid, cs->buf); + } + } + + /* Process hangup only after parsing messages */ + if (hup) + disable_subproxy(gdata, proxi, subproxy); + if (cs) + cksem_post(&cs->sem); + } + + return NULL; +} + +/* Thread that handles all received messages from user proxies */ +static void *userproxy_recv(void *arg) +{ + ckpool_t *ckp = (ckpool_t *)arg; + gdata_t *gdata = ckp->gdata; + struct epoll_event event; + int epfd; + + rename_proc("uproxyrecv"); + pthread_detach(pthread_self()); + + epfd = epoll_create1(EPOLL_CLOEXEC); + if (epfd < 0){ + LOGEMERG("FATAL: Failed to create epoll in userproxy_recv"); + return NULL; + } + + while (42) { + proxy_instance_t *proxy, *tmpproxy; + bool message = false, hup = false; + share_msg_t *share, *tmpshare; + notify_instance_t *ni, *tmp; + connsock_t *cs; + float timeout; + time_t now; + int ret; + + mutex_lock(&gdata->lock); + HASH_ITER(hh, gdata->proxies, proxy, tmpproxy) { + if (!proxy->global && !proxy->alive) { + proxy->epfd = epfd; + reconnect_proxy(proxy); + } + } + mutex_unlock(&gdata->lock); + + ret = epoll_wait(epfd, &event, 1, 1000); + if (ret < 1) { + if (likely(!ret)) + continue; + LOGEMERG("Failed to epoll_wait in userproxy_recv"); + break; + } + proxy = event.data.ptr; + /* Make sure we haven't popped this off before we've finished + * subscribe/auth */ + if (unlikely(!proxy->authorised)) + continue; + + now = time(NULL); + + mutex_lock(&gdata->notify_lock); + HASH_ITER(hh, gdata->notify_instances, ni, tmp) { + if (HASH_COUNT(gdata->notify_instances) < 3) + break; + if (ni->notify_time < now - 600) { + HASH_DEL(gdata->notify_instances, ni); + clear_notify(ni); + } + } + mutex_unlock(&gdata->notify_lock); + + /* Similary with shares older than 2 mins without response */ + mutex_lock(&gdata->share_lock); + HASH_ITER(hh, gdata->shares, share, tmpshare) { + if (share->submit_time < now - 120) { + HASH_DEL(gdata->shares, share); + free(share); + } + } + mutex_unlock(&gdata->share_lock); + + cs = &proxy->cs; + +#if 0 + /* Is this needed at all? */ + if (!proxy->alive) + continue; +#endif + + if ((event.events & (EPOLLHUP | EPOLLERR | EPOLLRDHUP))) { + LOGNOTICE("Proxy %d:%d %s hangup in userproxy_recv", proxy->id, + proxy->subid, proxy->url); + hup = true; + } + + if (likely(event.events & EPOLLIN)) { + timeout = 30; + + cksem_wait(&cs->sem); + ret = read_socket_line(cs, &timeout); + /* If we are unable to read anything within 30 + * seconds at this point after EPOLLIN is set + * then the socket is dead. */ + if (ret < 1) { + LOGNOTICE("Proxy %d:%d %s failed to read_socket_line in userproxy_recv", + proxy->id, proxy->subid, proxy->url); + hup = true; + } else { + message = true; + timeout = 0; + } + while (message || (ret = read_socket_line(cs, &timeout)) > 0) { + message = false; + timeout = 0; + /* proxy may have been recycled here if it is not a + * parent and reconnect was issued */ + if (parse_method(ckp, proxy, cs->buf)) + continue; + /* If it's not a method it should be a share result */ + if (!parse_share(gdata, proxy, cs->buf)) { + LOGNOTICE("Proxy %d:%d unhandled stratum message: %s", + proxy->id, proxy->subid, cs->buf); + } + } + cksem_post(&cs->sem); + } + + if (hup) { + disable_subproxy(gdata, proxy->parent, proxy); + continue; + } + } + return NULL; +} + +static void prepare_proxy(proxy_instance_t *proxi) +{ + proxi->parent = proxi; + mutex_init(&proxi->proxy_lock); + add_subproxy(proxi, proxi); + if (proxi->global) + create_pthread(&proxi->pth_precv, proxy_recv, proxi); +} + +static proxy_instance_t *wait_best_proxy(ckpool_t *ckp, gdata_t *gdata) +{ + proxy_instance_t *ret = NULL, *proxi, *tmp; + int retries = 0; + + while (42) { + mutex_lock(&gdata->lock); + HASH_ITER(hh, gdata->proxies, proxi, tmp) { + if (proxi->disabled || !proxi->global) + continue; + if (proxi->alive || subproxies_alive(proxi)) { + if (!ret || proxi->id < ret->id) + ret = proxi; + } + } + mutex_unlock(&gdata->lock); + + if (ret) + break; + /* Send reject message if we are unable to find an active + * proxy for more than 5 seconds */ + if (!((retries++) % 5)) + send_proc(ckp->connector, "reject"); + sleep(1); + } + send_proc(ckp->connector, ret ? "accept" : "reject"); + return ret; +} + +static void send_list(gdata_t *gdata, const int sockd) +{ + proxy_instance_t *proxy, *tmp; + json_t *val, *array_val; + + array_val = json_array(); + + mutex_lock(&gdata->lock); + HASH_ITER(hh, gdata->proxies, proxy, tmp) { + JSON_CPACK(val, "{si,sb,si,ss,ss,sf,sb,sb,si}", + "id", proxy->id, "global", proxy->global, "userid", proxy->userid, + "auth", proxy->auth, "pass", proxy->pass, + "diff", proxy->diff, + "disabled", proxy->disabled, "alive", proxy->alive, + "subproxies", proxy->subproxy_count); + if (proxy->enonce1) { + json_set_string(val, "enonce1", proxy->enonce1); + json_set_int(val, "nonce1len", proxy->nonce1len); + json_set_int(val, "nonce2len", proxy->nonce2len); + } + json_array_append_new(array_val, val); + } + mutex_unlock(&gdata->lock); + + JSON_CPACK(val, "{so}", "proxies", array_val); + send_api_response(val, sockd); +} + +static void send_sublist(gdata_t *gdata, const int sockd, const char *buf) +{ + proxy_instance_t *proxy, *subproxy, *tmp; + json_t *val = NULL, *res = NULL, *array_val; + json_error_t err_val; + int64_t id; + + array_val = json_array(); + + val = json_loads(buf, 0, &err_val); + if (unlikely(!val)) { + res = json_encode_errormsg(&err_val); + goto out; + } + if (unlikely(!json_get_int64(&id, val, "id"))) { + res = json_errormsg("Failed to get ID in send_sublist JSON: %s", buf); + goto out; + } + proxy = proxy_by_id(gdata, id); + if (unlikely(!proxy)) { + res = json_errormsg("Failed to find proxy %"PRId64" in send_sublist", id); + goto out; + } + + mutex_lock(&gdata->lock); + HASH_ITER(sh, proxy->subproxies, subproxy, tmp) { + JSON_CPACK(res, "{si,ss,ss,sf,sb,sb}", + "subid", subproxy->id, + "auth", subproxy->auth, "pass", subproxy->pass, + "diff", subproxy->diff, + "disabled", subproxy->disabled, "alive", subproxy->alive); + if (subproxy->enonce1) { + json_set_string(res, "enonce1", subproxy->enonce1); + json_set_int(res, "nonce1len", subproxy->nonce1len); + json_set_int(res, "nonce2len", subproxy->nonce2len); + } + json_array_append_new(array_val, res); + } + mutex_unlock(&gdata->lock); + + JSON_CPACK(res, "{so}", "subproxies", array_val); +out: + if (val) + json_decref(val); + send_api_response(res, sockd); +} + +static proxy_instance_t *__add_proxy(ckpool_t *ckp, gdata_t *gdata, const int num); + +static proxy_instance_t *__add_userproxy(ckpool_t *ckp, gdata_t *gdata, const int id, + const int userid, char *url, char *auth, char *pass) +{ + proxy_instance_t *proxy; + + gdata->proxies_generated++; + proxy = ckzalloc(sizeof(proxy_instance_t)); + proxy->id = id; + proxy->userid = userid; + proxy->url = url; + proxy->baseurl = strdup(url); + proxy->auth = auth; + proxy->pass = pass; + proxy->ckp = proxy->cs.ckp = ckp; + cksem_init(&proxy->cs.sem); + cksem_post(&proxy->cs.sem); + HASH_ADD_INT(gdata->proxies, id, proxy); + return proxy; +} + +static void add_userproxy(ckpool_t *ckp, gdata_t *gdata, const int userid, + const char *url, const char *auth, const char *pass) +{ + proxy_instance_t *proxy; + char *newurl = strdup(url); + char *newauth = strdup(auth); + char *newpass = strdup(pass ? pass : ""); + int id; + + mutex_lock(&gdata->lock); + id = ckp->proxies++; + proxy = __add_userproxy(ckp, gdata, id, userid, newurl, newauth, newpass); + mutex_unlock(&gdata->lock); + + LOGWARNING("Adding non global user %s, %d proxy %d:%s", auth, userid, id, url); + prepare_proxy(proxy); +} + +static void parse_addproxy(ckpool_t *ckp, gdata_t *gdata, const int sockd, const char *buf) +{ + char *url = NULL, *auth = NULL, *pass = NULL; + json_t *val = NULL, *res = NULL; + proxy_instance_t *proxy; + json_error_t err_val; + int id, userid; + bool global; + + val = json_loads(buf, 0, &err_val); + if (unlikely(!val)) { + res = json_encode_errormsg(&err_val); + goto out; + } + json_get_string(&url, val, "url"); + json_get_string(&auth, val, "auth"); + json_get_string(&pass, val, "pass"); + if (json_get_int(&userid, val, "userid")) + global = false; + else + global = true; + if (unlikely(!url || !auth || !pass)) { + res = json_errormsg("Failed to decode url/auth/pass in addproxy %s", buf); + goto out; + } + + mutex_lock(&gdata->lock); + id = ckp->proxies++; + if (global) { + ckp->proxyurl = realloc(ckp->proxyurl, sizeof(char **) * ckp->proxies); + ckp->proxyauth = realloc(ckp->proxyauth, sizeof(char **) * ckp->proxies); + ckp->proxypass = realloc(ckp->proxypass, sizeof(char **) * ckp->proxies); + ckp->proxyurl[id] = url; + ckp->proxyauth[id] = auth; + ckp->proxypass[id] = pass; + proxy = __add_proxy(ckp, gdata, id); + } else + proxy = __add_userproxy(ckp, gdata, id, userid, url, auth, pass); + mutex_unlock(&gdata->lock); + + if (global) + LOGNOTICE("Adding global proxy %d:%s", id, proxy->url); + else + LOGNOTICE("Adding user %d proxy %d:%s", userid, id, proxy->url); + prepare_proxy(proxy); + if (global) { + JSON_CPACK(res, "{si,ss,ss,ss}", + "id", proxy->id, "url", url, "auth", auth, "pass", pass); + } else { + JSON_CPACK(res, "{si,ss,ss,ss,si}", + "id", proxy->id, "url", url, "auth", auth, "pass", pass, + "userid", proxy->userid); + } +out: + if (val) + json_decref(val); + send_api_response(res, sockd); +} + +static void delete_proxy(ckpool_t *ckp, gdata_t *gdata, proxy_instance_t *proxy) +{ + proxy_instance_t *subproxy; + + /* Remove the proxy from the master list first */ + mutex_lock(&gdata->lock); + HASH_DEL(gdata->proxies, proxy); + /* Disable all its threads */ + pthread_cancel(proxy->pth_precv); + close_proxy_socket(proxy, proxy); + mutex_unlock(&gdata->lock); + + /* Recycle all its subproxies */ + do { + mutex_lock(&proxy->proxy_lock); + subproxy = proxy->subproxies; + if (subproxy) + HASH_DELETE(sh, proxy->subproxies, subproxy); + mutex_unlock(&proxy->proxy_lock); + + if (subproxy) { + close_proxy_socket(proxy, subproxy); + send_stratifier_delproxy(ckp, subproxy->id, subproxy->subid); + if (proxy != subproxy) + store_proxy(gdata, subproxy); + } + } while (subproxy); + + /* Recycle the proxy itself */ + store_proxy(gdata, proxy); +} + +static void parse_delproxy(ckpool_t *ckp, gdata_t *gdata, const int sockd, const char *buf) +{ + json_t *val = NULL, *res = NULL; + proxy_instance_t *proxy; + json_error_t err_val; + int id = -1; + + val = json_loads(buf, 0, &err_val); + if (unlikely(!val)) { + res = json_encode_errormsg(&err_val); + goto out; + } + json_get_int(&id, val, "id"); + proxy = proxy_by_id(gdata, id); + if (!proxy) { + res = json_errormsg("Proxy id %d not found", id); + goto out; + } + JSON_CPACK(res, "{si,ss,ss,ss,ss}", "id", proxy->id, "url", proxy->url, + "baseurl", proxy->baseurl,"auth", proxy->auth, "pass", proxy->pass); + + LOGNOTICE("Deleting proxy %d:%s", proxy->id, proxy->url); + delete_proxy(ckp, gdata, proxy); +out: + if (val) + json_decref(val); + send_api_response(res, sockd); +} + +static void parse_ableproxy(gdata_t *gdata, const int sockd, const char *buf, bool disable) +{ + json_t *val = NULL, *res = NULL; + proxy_instance_t *proxy; + json_error_t err_val; + int id = -1; + + val = json_loads(buf, 0, &err_val); + if (unlikely(!val)) { + res = json_encode_errormsg(&err_val); + goto out; + } + json_get_int(&id, val, "id"); + proxy = proxy_by_id(gdata, id); + if (!proxy) { + res = json_errormsg("Proxy id %d not found", id); + goto out; + } + JSON_CPACK(res, "{si,ss, ss,ss,ss}", "id", proxy->id, "url", proxy->url, + "baseurl", proxy->baseurl,"auth", proxy->auth, "pass", proxy->pass); + if (proxy->disabled != disable) { + proxy->disabled = disable; + LOGNOTICE("%sabling proxy %d:%s", disable ? "Dis" : "En", id, proxy->url); + } + if (disable) { + /* Set disabled bool here in case this is a parent proxy */ + proxy->disabled = true; + disable_subproxy(gdata, proxy, proxy); + } else + reconnect_proxy(proxy); +out: + if (val) + json_decref(val); + send_api_response(res, sockd); +} + +static void send_stats(gdata_t *gdata, const int sockd) +{ + json_t *val = json_object(), *subval; + int total_objects, objects; + int64_t generated, memsize; + proxy_instance_t *proxy; + stratum_msg_t *msg; + + mutex_lock(&gdata->lock); + objects = HASH_COUNT(gdata->proxies); + memsize = SAFE_HASH_OVERHEAD(gdata->proxies) + sizeof(proxy_instance_t) * objects; + generated = gdata->proxies_generated; + JSON_CPACK(subval, "{si,sI,sI}", "count", objects, "memory", memsize, "generated", generated); + json_set_object(val, "proxies", subval); + + DL_COUNT(gdata->dead_proxies, proxy, objects); + memsize = sizeof(proxy_instance_t) * objects; + JSON_CPACK(subval, "{si,sI}", "count", objects, "memory", memsize); + json_set_object(val, "dead_proxies", subval); + + total_objects = memsize = 0; + for (proxy = gdata->proxies; proxy; proxy=proxy->hh.next) { + mutex_lock(&proxy->proxy_lock); + total_objects += objects = HASH_COUNT(proxy->subproxies); + memsize += SAFE_HASH_OVERHEAD(proxy->subproxies) + sizeof(proxy_instance_t) * objects; + mutex_unlock(&proxy->proxy_lock); + } + generated = gdata->subproxies_generated; + mutex_unlock(&gdata->lock); + + JSON_CPACK(subval, "{si,sI,sI}", "count", total_objects, "memory", memsize, "generated", generated); + json_set_object(val, "subproxies", subval); + + mutex_lock(&gdata->notify_lock); + objects = HASH_COUNT(gdata->notify_instances); + memsize = SAFE_HASH_OVERHEAD(gdata->notify_instances) + sizeof(notify_instance_t) * objects; + generated = gdata->proxy_notify_id; + mutex_unlock(&gdata->notify_lock); + + JSON_CPACK(subval, "{si,sI,sI}", "count", objects, "memory", memsize, "generated", generated); + json_set_object(val, "notifies", subval); + + mutex_lock(&gdata->share_lock); + objects = HASH_COUNT(gdata->shares); + memsize = SAFE_HASH_OVERHEAD(gdata->shares) + sizeof(share_msg_t) * objects; + generated = gdata->share_id; + mutex_unlock(&gdata->share_lock); + + JSON_CPACK(subval, "{si,sI,sI}", "count", objects, "memory", memsize, "generated", generated); + json_set_object(val, "shares", subval); + + mutex_lock(&gdata->psend_lock); + DL_COUNT(gdata->psends, msg, objects); + generated = gdata->psends_generated; + mutex_unlock(&gdata->psend_lock); + + memsize = sizeof(stratum_msg_t) * objects; + JSON_CPACK(subval, "{si,sI,sI}", "count", objects, "memory", memsize, "generated", generated); + json_set_object(val, "psends", subval); + + send_api_response(val, sockd); +} + +/* Entered with parent proxy locked */ +static json_t *__proxystats(proxy_instance_t *proxy, proxy_instance_t *parent, bool discrete) +{ + json_t *val = json_object(); + + /* Opportunity to update hashrate just before we report it without + * needing to check on idle proxies regularly */ + __decay_proxy(proxy, parent, 0); + + json_set_int(val, "id", proxy->id); + json_set_int(val, "userid", proxy->userid); + json_set_string(val, "baseurl", proxy->baseurl); + json_set_string(val, "url", proxy->url); + json_set_string(val, "auth", proxy->auth); + json_set_string(val, "pass", proxy->pass); + json_set_string(val, "enonce1", proxy->enonce1 ? proxy->enonce1 : ""); + json_set_int(val, "nonce1len", proxy->nonce1len); + json_set_int(val, "nonce2len", proxy->nonce2len); + json_set_double(val, "diff", proxy->diff); + if (parent_proxy(proxy)) { + json_set_double(val, "total_accepted", proxy->total_accepted); + json_set_double(val, "total_rejected", proxy->total_rejected); + json_set_int(val, "subproxies", proxy->subproxy_count); + json_set_double(val, "tdsps1", proxy->tdsps1); + json_set_double(val, "tdsps5", proxy->tdsps5); + json_set_double(val, "tdsps60", proxy->tdsps60); + json_set_double(val, "tdsps1440", proxy->tdsps1440); + } + if (discrete) { + json_set_double(val, "dsps1", proxy->dsps1); + json_set_double(val, "dsps5", proxy->dsps5); + json_set_double(val, "dsps60", proxy->dsps60); + json_set_double(val, "dsps1440", proxy->dsps1440); + json_set_double(val, "accepted", proxy->diff_accepted); + json_set_double(val, "rejected", proxy->diff_rejected); + } + json_set_string(val, "connect", proxy_status[parent->connect_status]); + json_set_string(val, "subscribe", proxy_status[parent->subscribe_status]); + json_set_string(val, "authorise", proxy_status[parent->auth_status]); + json_set_int(val, "backoff", parent->backoff); + json_set_int(val, "lastshare", proxy->last_share.tv_sec); + json_set_bool(val, "global", proxy->global); + json_set_bool(val, "disabled", proxy->disabled); + json_set_bool(val, "alive", proxy->alive); + json_set_int(val, "maxclients", proxy->clients_per_proxy); + + return val; +} + +static json_t *proxystats(proxy_instance_t *proxy, bool discrete) +{ + proxy_instance_t *parent = proxy->parent; + json_t *val; + + mutex_lock(&parent->proxy_lock); + val = __proxystats(proxy, parent, discrete); + mutex_unlock(&parent->proxy_lock); + + return val; +} + +static json_t *all_proxystats(gdata_t *gdata) +{ + json_t *res, *arr_val = json_array(); + proxy_instance_t *proxy, *tmp; + + mutex_lock(&gdata->lock); + HASH_ITER(hh, gdata->proxies, proxy, tmp) { + mutex_unlock(&gdata->lock); + json_array_append_new(arr_val, proxystats(proxy, false)); + mutex_lock(&gdata->lock); + } + mutex_unlock(&gdata->lock); + + JSON_CPACK(res, "{so}", "proxy", arr_val); + return res; +} + +static void parse_proxystats(gdata_t *gdata, const int sockd, const char *buf) +{ + json_t *val = NULL, *res = NULL; + proxy_instance_t *proxy; + json_error_t err_val; + bool totals = false; + int id, subid = 0; + + val = json_loads(buf, 0, &err_val); + if (unlikely(!val)) { + res = all_proxystats(gdata); + goto out_noval; + } + if (!json_get_int(&id, val, "id")) { + res = all_proxystats(gdata); + goto out; + } + if (!json_get_int(&subid, val, "subid")) + totals = true; + proxy = proxy_by_id(gdata, id); + if (!proxy) { + res = json_errormsg("Proxy id %d not found", id); + goto out; + } + if (!totals) + proxy = subproxy_by_id(proxy, subid); + if (!proxy) { + res = json_errormsg("Proxy id %d:%d not found", id, subid); + goto out; + } + res = proxystats(proxy, true); +out: + json_decref(val); +out_noval: + send_api_response(res, sockd); +} + +static void send_subproxystats(gdata_t *gdata, const int sockd) +{ + json_t *res, *arr_val = json_array(); + proxy_instance_t *parent, *tmp; + + mutex_lock(&gdata->lock); + HASH_ITER(hh, gdata->proxies, parent, tmp) { + json_t *val, *subarr_val = json_array(); + proxy_instance_t *subproxy, *subtmp; + + mutex_unlock(&gdata->lock); + + mutex_lock(&parent->proxy_lock); + HASH_ITER(sh, parent->subproxies, subproxy, subtmp) { + val = __proxystats(subproxy, parent, true); + json_set_int(val, "subid", subproxy->subid); + json_array_append_new(subarr_val, val); + } + mutex_unlock(&parent->proxy_lock); + + JSON_CPACK(val, "{si,so}", + "id", parent->id, + "subproxy", subarr_val); + json_array_append_new(arr_val, val); + mutex_lock(&gdata->lock); + } + mutex_unlock(&gdata->lock); + + JSON_CPACK(res, "{so}", "proxy", arr_val); + send_api_response(res, sockd); +} + +static void parse_globaluser(ckpool_t *ckp, gdata_t *gdata, const char *buf) +{ + char *url, *username, *pass = strdupa(buf); + int userid = -1, proxyid = -1; + proxy_instance_t *proxy, *tmp; + int64_t clientid = -1; + bool found = false; + + sscanf(buf, "%d:%d:%"PRId64":%s", &proxyid, &userid, &clientid, pass); + if (unlikely(clientid < 0 || userid < 0 || proxyid < 0)) { + LOGWARNING("Failed to parse_globaluser ids from command %s", buf); + return; + } + username = strsep(&pass, ","); + if (unlikely(!username)) { + LOGWARNING("Failed to parse_globaluser username from command %s", buf); + return; + } + + LOGDEBUG("Checking userproxy proxy %d user %d:%"PRId64" worker %s pass %s", + proxyid, userid, clientid, username, pass); + + if (unlikely(proxyid >= ckp->proxies)) { + LOGWARNING("Trying to find non-existent proxy id %d in parse_globaluser", proxyid); + return; + } + + mutex_lock(&gdata->lock); + url = ckp->proxyurl[proxyid]; + HASH_ITER(hh, gdata->proxies, proxy, tmp) { + if (!strcmp(proxy->auth, username)) { + found = true; + break; + } + } + mutex_unlock(&gdata->lock); + + if (found) + return; + add_userproxy(ckp, gdata, userid, url, username, pass); +} + +static void proxy_loop(proc_instance_t *pi) +{ + proxy_instance_t *proxi = NULL, *cproxy; + server_instance_t *si = NULL, *old_si; + ckpool_t *ckp = pi->ckp; + gdata_t *gdata = ckp->gdata; + unix_msg_t *umsg = NULL; + connsock_t *cs = NULL; + char *buf = NULL; + +reconnect: + clear_unix_msg(&umsg); + + if (ckp->node) { + old_si = si; + si = live_server(ckp, gdata); + if (!si) + goto out; + cs = &si->cs; + if (!old_si) + LOGWARNING("Connected to bitcoind: %s:%s", cs->url, cs->port); + else if (si != old_si) + LOGWARNING("Failed over to bitcoind: %s:%s", cs->url, cs->port); + } + + /* This does not necessarily mean we reconnect, but a change has + * occurred and we need to reexamine the proxies. */ + cproxy = wait_best_proxy(ckp, gdata); + if (!cproxy) + goto out; + if (proxi != cproxy) { + gdata->current_proxy = proxi = cproxy; + LOGWARNING("Successfully connected to pool %d %s as proxy%s", + proxi->id, proxi->url, ckp->passthrough ? " in passthrough mode" : ""); + } + + if (unlikely(!ckp->generator_ready)) { + ckp->generator_ready = true; + LOGWARNING("%s generator ready", ckp->name); + } +retry: + clear_unix_msg(&umsg); + do { + umsg = get_unix_msg(pi); + } while (!umsg); + + buf = umsg->buf; + LOGDEBUG("Proxy received request: %s", buf); + if (cmdmatch(buf, "stats")) { + send_stats(gdata, umsg->sockd); + } else if (cmdmatch(buf, "list")) { + send_list(gdata, umsg->sockd); + } else if (cmdmatch(buf, "sublist")) { + send_sublist(gdata, umsg->sockd, buf + 8); + } else if (cmdmatch(buf, "addproxy")) { + parse_addproxy(ckp, gdata, umsg->sockd, buf + 9); + } else if (cmdmatch(buf, "delproxy")) { + parse_delproxy(ckp, gdata, umsg->sockd, buf + 9); + } else if (cmdmatch(buf, "enableproxy")) { + parse_ableproxy(gdata, umsg->sockd, buf + 12, false); + } else if (cmdmatch(buf, "disableproxy")) { + parse_ableproxy(gdata, umsg->sockd, buf + 13, true); + } else if (cmdmatch(buf, "proxystats")) { + parse_proxystats(gdata, umsg->sockd, buf + 11); + } else if (cmdmatch(buf, "subproxystats")) { + send_subproxystats(gdata, umsg->sockd); + } else if (cmdmatch(buf, "globaluser")) { + parse_globaluser(ckp, gdata, buf + 11); + } else if (cmdmatch(buf, "reconnect")) { + goto reconnect; + } else if (cmdmatch(buf, "submitblock:")) { + char blockmsg[80]; + bool ret; + + LOGNOTICE("Submitting likely block solve share from upstream pool"); + ret = submit_block(cs, buf + 12 + 64 + 1); + memset(buf + 12 + 64, 0, 1); + sprintf(blockmsg, "%sblock:%s", ret ? "" : "no", buf + 12); + send_proc(ckp->stratifier, blockmsg); + } else if (cmdmatch(buf, "submittxn:")) { + if (unlikely(strlen(buf) < 11)) { + LOGWARNING("Got zero length submittxn"); + goto retry; + } + submit_txn(cs, buf + 10); + } else if (cmdmatch(buf, "loglevel")) { + sscanf(buf, "loglevel=%d", &ckp->loglevel); + } else if (cmdmatch(buf, "ping")) { + LOGDEBUG("Proxy received ping request"); + send_unix_msg(umsg->sockd, "pong"); + } else if (cmdmatch(buf, "recruit")) { + recruit_subproxy(gdata, buf); + } else if (cmdmatch(buf, "dropproxy")) { + drop_proxy(gdata, buf); + } else { + LOGWARNING("Generator received unrecognised message: %s", buf); + } + goto retry; +out: + return; +} + +/* Check which servers are alive, maintaining a connection with them and + * reconnect if a higher priority one is available. */ +static void *server_watchdog(void *arg) +{ + ckpool_t *ckp = (ckpool_t *)arg; + gdata_t *gdata = ckp->gdata; + + rename_proc("swatchdog"); + + pthread_detach(pthread_self()); + + while (42) { + server_instance_t *best = NULL; + ts_t timer_t; + int i; + + cksleep_prepare_r(&timer_t); + for (i = 0; i < ckp->btcds; i++) { + server_instance_t *si = ckp->servers[i]; + + /* Have we reached the current server? */ + if (server_alive(ckp, si, true) && !best) + best = si; + } + if (best && best != gdata->current_si) + send_proc(ckp->generator, "reconnect"); + cksleep_ms_r(&timer_t, 5000); + } + return NULL; +} + +static void setup_servers(ckpool_t *ckp) +{ + pthread_t pth_watchdog; + int i; + + ckp->servers = ckalloc(sizeof(server_instance_t *) * ckp->btcds); + for (i = 0; i < ckp->btcds; i++) { + server_instance_t *si; + connsock_t *cs; + + ckp->servers[i] = ckzalloc(sizeof(server_instance_t)); + si = ckp->servers[i]; + si->url = ckp->btcdurl[i]; + si->auth = ckp->btcdauth[i]; + si->pass = ckp->btcdpass[i]; + si->notify = ckp->btcdnotify[i]; + si->id = i; + cs = &si->cs; + cs->ckp = ckp; + cksem_init(&cs->sem); + cksem_post(&cs->sem); + } + + create_pthread(&pth_watchdog, server_watchdog, ckp); +} + +static void server_mode(ckpool_t *ckp, proc_instance_t *pi) +{ + int i; + + setup_servers(ckp); + + gen_loop(pi); + + for (i = 0; i < ckp->btcds; i++) { + server_instance_t *si = ckp->servers[i]; + + kill_server(si); + dealloc(si); + } + dealloc(ckp->servers); +} + +static proxy_instance_t *__add_proxy(ckpool_t *ckp, gdata_t *gdata, const int id) +{ + proxy_instance_t *proxy; + + gdata->proxies_generated++; + proxy = ckzalloc(sizeof(proxy_instance_t)); + proxy->id = id; + proxy->url = strdup(ckp->proxyurl[id]); + proxy->baseurl = strdup(proxy->url); + proxy->auth = strdup(ckp->proxyauth[id]); + if (ckp->proxypass[id]) + proxy->pass = strdup(ckp->proxypass[id]); + else + proxy->pass = strdup(""); + proxy->ckp = proxy->cs.ckp = ckp; + HASH_ADD_INT(gdata->proxies, id, proxy); + proxy->global = true; + cksem_init(&proxy->cs.sem); + cksem_post(&proxy->cs.sem); + return proxy; +} + +static void proxy_mode(ckpool_t *ckp, proc_instance_t *pi) +{ + gdata_t *gdata = ckp->gdata; + proxy_instance_t *proxy; + int i; + + mutex_init(&gdata->lock); + mutex_init(&gdata->notify_lock); + mutex_init(&gdata->share_lock); + + if (ckp->node) + setup_servers(ckp); + + /* Create all our proxy structures and pointers */ + for (i = 0; i < ckp->proxies; i++) { + proxy = __add_proxy(ckp, gdata, i); + if (ckp->passthrough) { + create_pthread(&proxy->pth_precv, passthrough_recv, proxy); + proxy->passsends = create_ckmsgq(ckp, "passsend", &passthrough_send); + } else { + mutex_init(&gdata->psend_lock); + cond_init(&gdata->psend_cond); + prepare_proxy(proxy); + create_pthread(&gdata->pth_uprecv, userproxy_recv, ckp); + create_pthread(&gdata->pth_psend, proxy_send, ckp); + } + } + + proxy_loop(pi); +} + +void *generator(void *arg) +{ + proc_instance_t *pi = (proc_instance_t *)arg; + ckpool_t *ckp = pi->ckp; + gdata_t *gdata; + + rename_proc(pi->processname); + LOGWARNING("%s generator starting", ckp->name); + gdata = ckzalloc(sizeof(gdata_t)); + ckp->gdata = gdata; + gdata->ckp = ckp; + + if (ckp->proxy) { + /* Wait for the stratifier to be ready for us */ + while (!ckp->stratifier_ready) + cksleep_ms(10); + proxy_mode(ckp, pi); + } else + server_mode(ckp, pi); + /* We should never get here unless there's a fatal error */ + LOGEMERG("Generator failure, shutting down"); + exit(1); + return NULL; +} diff --git a/solo-ckpool-source/src/generator.h b/solo-ckpool-source/src/generator.h new file mode 100644 index 0000000..4331956 --- /dev/null +++ b/solo-ckpool-source/src/generator.h @@ -0,0 +1,30 @@ +/* + * Copyright 2014-2018,2023 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#ifndef GENERATOR_H +#define GENERATOR_H + +#include "config.h" + +#define GETBEST_FAILED -1 +#define GETBEST_NOTIFY 0 +#define GETBEST_SUCCESS 1 + +void generator_add_send(ckpool_t *ckp, json_t *val); +struct genwork *generator_getbase(ckpool_t *ckp); +int generator_getbest(ckpool_t *ckp, char *hash); +bool generator_checkaddr(ckpool_t *ckp, const char *addr, bool *script, bool *segwit); +bool generator_checktxn(const ckpool_t *ckp, const char *txn, json_t **val); +char *generator_get_txn(ckpool_t *ckp, const char *hash); +bool generator_submitblock(ckpool_t *ckp, const char *buf); +void generator_preciousblock(ckpool_t *ckp, const char *hash); +bool generator_get_blockhash(ckpool_t *ckp, int height, char *hash); +void *generator(void *arg); + +#endif /* GENERATOR_H */ diff --git a/solo-ckpool-source/src/jansson-2.14/CHANGES b/solo-ckpool-source/src/jansson-2.14/CHANGES new file mode 100644 index 0000000..cb6ff07 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/CHANGES @@ -0,0 +1,986 @@ +Version 2.14 +============ + +Released 2021-09-09 + +* New Features: + + - Add `json_object_getn`, `json_object_setn`, `json_object_deln`, and the + corresponding `nocheck` functions. (#520, by Maxim Zhukov) + +* Fixes: + + - Handle `sprintf` corner cases (#537, by Tobias Stoeckmann) + +* Build: + + - Symbol versioning for all exported symbols (#540, by Simon McVittie) + - Fix compiler warnings (#555, by Kelvin Lee) + +* Documentation: + + - Small fixes (#544, #546, by @i-ky) + - Sphinx 3 compatibility (#543, by Pierce Lopez) + + +Version 2.13.1 +============== + +Released 2020-05-07 + +* Build: + + - Include `jansson_version_str()` and `jansson_version_cmp()` in + shared library. (#534) + + - Include ``scripts/`` in tarball. (#535) + + +Version 2.13 +============ + +Released 2020-05-05 + +* New Features: + + - Add `jansson_version_str()` and `jansson_version_cmp()` for runtime + version checking (#465). + + - Add `json_object_update_new()`, `json_object_update_existing_new()` + and `json_object_update_missing_new()` functions (#499). + + - Add `json_object_update_recursive()` (#505). + +* Build: + + - Add ``-Wno-format-truncation`` to suppress format truncation warnings (#489). + +* Bug fixes: + + - Remove ``strtod`` macro definition for MinGW (#498). + + - Add infinite loop check in `json_deep_copy()` (#490). + + - Add ``pipe`` macro definition for MinGW (#500). + + - Enhance ``JANSSON_ATTRS`` macro to support earlier C standard(C89) (#501). + + - Update version detection for sphinx-build (#502). + +* Documentation: + + - Fix typos (#483, #494). + + - Document that call the custom free function to free the return value + of `json_dumps()` if you have a custom malloc/free (#490). + + - Add vcpkg installation instructions (#496). + + - Document that non-blocking file descriptor is not supported on + `json_loadfd()` (#503). + + +Version 2.12 +============ + +Released 2018-11-26 + +* Bug fixes: + + - Fix error message in `json_pack()` for NULL object (#409). + + - Avoid invalid memory read in `json_pack()` (#421). + + - Call va_end after va_copy in `json_vsprintf()` (#427). + + - Improve handling of formats with '?' and '*' in `json_pack()` (#438). + + - Remove inappropriate `jsonp_free()` which caused segmentation fault in + error handling (#444). + +* Build: + + - Add function attributes for GCC and CLANG to provide warnings on improper + use of jansson routines (#404). + + - Many CMake fixes (#408, #412, #415). + + - Enable -Bsymbolic-functions linker flag whenever possible. + + - Resolve various compiler warnings (#423, #430, #435, #436). + + - Fix code coverage ignored paths (#439). + +* Other: + + - Test coverage improvements (#398, #400). + + - Add VS 2017 to appveyor, update Visual Studio documentation (#417). + + - Update copyright for 2018 (#424). + + - Update install instructions in README (#401). + +Version 2.11 +============ + +Released 2018-02-09 + +* New features: + + - Add `json_pack()` format specifiers s*, o* and O* for values that + can be omitted if null (#339). + + - Add `json_error_code()` to retrieve numeric error codes (#365, #380, + #381). + + - Enable thread safety for `json_dump()` on all systems. Enable thread + safe `json_decref()` and `json_incref()` for modern compilers (#389). + + - Add `json_sprintf()` and `json_vsprintf()` (#393). + +* Bug Fixes: + + - Fix incorrect report of success from `json_dump_file()` when an error + is returned by `fclose()` (#359). + + - Make json_equal() const-correct (#344). + + - Fix incomplete stealing of references by `json_pack()` (#374). + +* Build: + + - Work around gcc's -Wimplicit-fallthrough. + + - Fix CMake detection of ``sys/types.h`` header (#375). + + - Fix `jansson.pc` generated by CMake to be more consistent with the one + generated using GNU Autotools (#368). + +* Other: + + - Miscellaneous documentation fixes (#356, #378, #395). + + - Remove unnecessary reference actions from parsers (#377). + +Version 2.10 +============ + +Released 2017-03-02 + +* New features: + + - Add JSON_EMBED encoding flag allowing arrays and objects to be encoded + into existing streams (#329). + + - Add `json_dumpb()` function for dumping to a pre-allocated buffer (#328). + + - Add `json_dumpfd()` and `json_loadfd()` functions for dumping to streaming + file descriptors (#328). + + - Add support for parsing buffers larger than 2GB (#309). + +* Build: + + - Fix CMake build when LONG_LONG_INT is defined as "" (#321) + +* Other: + + - Internal code cleanup (#311, #314) + +Version 2.9 +=========== + +Released 2016-09-18 + +* New features: + + - Add ``json_auto_t`` to automatically decref a value that goes out + of scope. Available only on GCC and Clang. (#301) + +* Build: + + - Fix CMake build (at least on Linux) by removing conflicting + jansson_config.h from the distribution (#306) + + - Change CMake install target generation to be optional (#305) + +* Documentation: + + - Small documentation fixes. + + +Version 2.8 +=========== + +Released 2016-08-30 + +* New features: + + - Always preserve insertion order of object items. + `json_object_iter()` and friends, `json_object_foreach()` and + `json_dumps()` and friends now always work in the insertion order of + object items (#293). + + - Add `json_object_foreach_safe()` macro that allows + `json_object_del()` calls during iteration (#230). + + - Add `json_get_alloc_funcs()` to allow reading the allocation + functions set by `json_set_alloc_funcs()` (#262, #264). + + - Add `json_pack()` format specifiers s?, o? and O? for values that + can be null (#261, #270). + +* Bug fixes: + + - Fix a crash when parsing inputs consisting of very deeply nested + arrays or objects (#282, #284). + + - Never convert numbers to integers in the parser when + JSON_DECODE_INT_AS_REAL is set. This fixes error messages for + overflowing numbers when JSON_DECODE_INT_AS_REAL is set (#212). + + - Fix a use-after-free in `json_pack()` error handling. + + - Fix subnormal number parsing on mingw32. + + - Handle out-of-memory situations gracefully in the hashtable + implementation (#298). + +* Build: + + - Fix build with CMake on all versions of Visual Studio up to 2015 + (#262, #289). + + - Fix pkgconfig libdir when using CMake (#268). + + - Fix CMake config for static CRT builds on Windows (#206). + + - Fix warnings on LLVM 6.0 targeting iOS arm64 (#208). + + - Add coverlls.io support via Travis for a nice test coverage badge + (#211). + + - Don't expect ``jansson_config.h`` to be in the compiler's include + path (#209). + + - Add a build-time option to set initial hashtable size (#213). + + - Use snprintf and strncpy in place of sprintf and strcpy to silence + linker warnings on OpenBSD (#233). + +* Documentation: + + - Fix various typos in documentation, and a broken link (#258). + + - Add an example program in ``examples/`` (#214, #217). + + - Fix building of documentation man pages (#207). + + - Document the fact that copying objects doesn't preserve the + insertion order of keys (#237). + +* Tests: + + - Don't use the nonstandard __FUNCTION__ macro in tests. + + - Use expr instead of $((...)) in shell scripts for Solaris 10 + compatibility. + + - Disable Visual Studio warning C4756 when triggered deliberately in + tests (#216). + + - Other minor fixes (#221, #248). + +* Other changes: + + - List all unrecognized object keys when strict unpacking fails + (#263). + + - Alter the order of the members of the hashtable_pair struct for + easier debugging. + + - Minor performance improvement to `json_dump()` and friends (#234). + + - Minor style fixes (#255, #257). + + +Version 2.7 +=========== + +Released 2014-10-02 + +* New features: + + - `json_pack()` and friends: Add format specifiers ``s%`` and ``+%`` + for a size_t string length (#141). + + - `json_unpack()` and friends: Add format specifier ``s%`` for + unpacking the string length along with the string itself (#141). + + - Add length-aware string constructors `json_stringn()` and + `json_stringn_nocheck()`, length-aware string mutators + `json_string_setn()` and `json_string_setn_nocheck()`, and a + function for getting string's length `json_string_length()` (#141, + #143). + + - Support ``\u0000`` escapes in the decoder. The support can be + enabled by using the ``JSON_ALLOW_NUL`` decoding flag (#141). + + - Add `json_boolean_value()` as an alias for `json_is_true()` + (#146). + + - Add JSON_REAL_PRECISION encoding flag/macro for controlling real + number precision (#178). + + - Define the maximum indentation as JSON_MAX_INDENT (#191). + +* Bug fixes: + + - Some malformed ``\uNNNN`` escapes could crash the decoder with an + assertion failure. + + - Avoid integer overflows with very long strings in UTF-8 decoder and + hashtable. + + - Check for *NULL* key in `json_object_get()` and + `json_object_del()` (#151). + + - Enhance hashtable seeding on Windows (#162). + + - `json_unpack()`: Allow mixing JSON_STRICT with optional keys + (#162, #163). + + - Fix int/int32 mismatch (#142). + + - Parse subnormal numbers correctly (#202). + +* Build: + + - Remove VS2010 build files. CMake should be used on Windows instead + (#165). + + - Fix CMake build flags for MinGW (#193). + + - Add CMake config files for find_package. Rename config.h to + jansson_private_config.h (#157, #159). + + - Make Valgrind checks work with CMake (#160). + + - Fix feature checks to use correct __ATOMIC flags. + + - Fix CMake checks for uint16_t and uint8_t support (#177). + + - Make Jansson build on SmartOS/Solaris (#171). + + - Work around a GCC bug on Solaris (#175). + + - Fix autoreconf on Debian (#182). + + - Don't use GNU make specific export for global AM_CFLAGS (#203, + #204). + + - Fix building on Android using the supplied Android.mk (#166, + #174). + + - Android.mk: Add -DHAVE_STDINT_H to LOCAL_CFLAGS (#200). + +* Documentation: + + - Document JANSSON_BUILD_SHARED_LIBS CMake option (#187). + +* Tests: + + - Close file handles correctly (#198). + +* Other changes: + + - ``\uNNNN`` escapes are now encoded in upper case for better + readability. + + - Enable usage of AddressSanitizer (#180). + + +Version 2.6 +=========== + +Released 2014-02-11 + +* Security: + + - CVE-2013-6401: The hash function used by the hashtable + implementation has been changed, and is automatically seeded with + random data when the first JSON object is created. This prevents + an attacker from causing large JSON objects with specially crafted + keys perform poorly. + +* New features: + + - `json_object_seed()`: Set the seed value of the hash function. + +* Bug fixes: + + - Include CMake specific files in the release tarball. + +* Documentation: + + - Fix tutorial source to send a User-Agent header, which is now + required by the GitHub API. + + - Set all memory to zero in secure_free() example. + + +Version 2.5 +=========== + +Released 2013-09-19 + +* New features: + + - `json_pack()` and friends: Add format specifiers ``s#``, ``+`` and + ``+#``. + + - Add ``JSON_DECODE_INT_AS_REAL`` decoding flag to treat all numbers + as real in the decoder (#123). + + - Add `json_array_foreach()`, paralleling `json_object_foreach()` + (#118). + +* Bug fixes: + + - `json_dumps()` and friends: Don't crash if json is *NULL* and + ``JSON_ENCODE_ANY`` is set. + + - Fix a theoretical integer overflow in `jsonp_strdup()`. + + - Fix `l_isxdigit()` macro (#97). + + - Fix an off-by-one error in `json_array_remove()`. + +* Build: + + - Support CMake in addition to GNU Autotools (#106, #107, #112, + #115, #120, #127). + + - Support building for Android (#109). + + - Don't use ``-Werror`` by default. + + - Support building and testing with VPATH (#93). + + - Fix compilation when ``NDEBUG`` is defined (#128) + +* Tests: + + - Fix a refleak in ``test/bin/json_process.c``. + +* Documentation: + + - Clarify the return value of `json_load_callback_t`. + + - Document how to circumvent problems with separate heaps on Windows. + + - Fix memory leaks and warnings in ``github_commits.c``. + + - Use `json_decref()` properly in tutorial. + +* Other: + + - Make it possible to forward declare ``struct json_t``. + + +Version 2.4 +=========== + +Released 2012-09-23 + +* New features: + + - Add `json_boolean()` macro that returns the JSON true or false + value based on its argument (#86). + + - Add `json_load_callback()` that calls a callback function + repeatedly to read the JSON input (#57). + + - Add JSON_ESCAPE_SLASH encoding flag to escape all occurences of + ``/`` with ``\/``. + +* Bug fixes: + + - Check for and reject NaN and Inf values for reals. Encoding these + values resulted in invalid JSON. + + - Fix `json_real_set()` to return -1 on error. + +* Build: + + - Jansson now builds on Windows with Visual Studio 2010, and + includes solution and project files in ``win32/vs2010/`` + directory. + + - Fix build warnings (#77, #78). + + - Add ``-no-undefined`` to LDFLAGS (#90). + +* Tests: + + - Fix the symbol exports test on Linux/PPC64 (#88). + +* Documentation: + + - Fix typos (#73, #84). + + +Version 2.3.1 +============= + +Released 2012-04-20 + +* Build issues: + + - Only use ``long long`` if ``strtoll()`` is also available. + +* Documentation: + + - Fix the names of library version constants in documentation. (#52) + + - Change the tutorial to use GitHub API v3. (#65) + +* Tests: + + - Make some tests locale independent. (#51) + + - Distribute the library exports test in the tarball. + + - Make test run on shells that don't support the ``export FOO=bar`` + syntax. + + +Version 2.3 +=========== + +Released 2012-01-27 + +* New features: + + - `json_unpack()` and friends: Add support for optional object keys + with the ``{s?o}`` syntax. + + - Add `json_object_update_existing()` and + `json_object_update_missing()`, for updating only existing keys or + only adding missing keys to an object. (#37) + + - Add `json_object_foreach()` for more convenient iteration over + objects. (#45, #46) + + - When decoding JSON, write the number of bytes that were read from + input to ``error.position`` also on success. This is handy with + ``JSON_DISABLE_EOF_CHECK``. + + - Add support for decoding any JSON value, not just arrays or + objects. The support is enabled with the new ``JSON_DECODE_ANY`` + flag. Patch by Andrea Marchesini. (#4) + +* Bug fixes + + - Avoid problems with object's serial number growing too big. (#40, + #41) + + - Decoding functions now return NULL if the first argument is NULL. + Patch by Andrea Marchesini. + + - Include ``jansson_config.h.win32`` in the distribution tarball. + + - Remove ``+`` and leading zeros from exponents in the encoder. + (#39) + + - Make Jansson build and work on MinGW. (#39, #38) + +* Documentation + + - Note that the same JSON values must not be encoded in parallel by + separate threads. (#42) + + - Document MinGW support. + + +Version 2.2.1 +============= + +Released 2011-10-06 + +* Bug fixes: + + - Fix real number encoding and decoding under non-C locales. (#32) + + - Fix identifier decoding under non-UTF-8 locales. (#35) + + - `json_load_file()`: Open the input file in binary mode for maximum + compatiblity. + +* Documentation: + + - Clarify the lifecycle of the result of the ``s`` fromat of + `json_unpack()`. (#31) + + - Add some portability info. (#36) + + - Little clarifications here and there. + +* Other: + + - Some style fixes, issues detected by static analyzers. + + +Version 2.2 +=========== + +Released 2011-09-03 + +* New features: + + - `json_dump_callback()`: Pass the encoder output to a callback + function in chunks. + +* Bug fixes: + + - `json_string_set()`: Check that target is a string and value is + not NULL. + +* Other: + + - Documentation typo fixes and clarifications. + + +Version 2.1 +=========== + +Released 2011-06-10 + +* New features: + + - `json_loadb()`: Decode a string with a given size, useful if the + string is not null terminated. + + - Add ``JSON_ENCODE_ANY`` encoding flag to allow encoding any JSON + value. By default, only arrays and objects can be encoded. (#19) + + - Add ``JSON_REJECT_DUPLICATES`` decoding flag to issue a decoding + error if any JSON object in the input contins duplicate keys. (#3) + + - Add ``JSON_DISABLE_EOF_CHECK`` decoding flag to stop decoding after a + valid JSON input. This allows other data after the JSON data. + +* Bug fixes: + + - Fix an additional memory leak when memory allocation fails in + `json_object_set()` and friends. + + - Clear errno before calling `strtod()` for better portability. (#27) + +* Building: + + - Avoid set-but-not-used warning/error in a test. (#20) + +* Other: + + - Minor clarifications to documentation. + + +Version 2.0.1 +============= + +Released 2011-03-31 + +* Bug fixes: + + - Replace a few `malloc()` and `free()` calls with their + counterparts that support custom memory management. + + - Fix object key hashing in json_unpack() strict checking mode. + + - Fix the parentheses in ``JANSSON_VERSION_HEX`` macro. + + - Fix `json_object_size()` return value. + + - Fix a few compilation issues. + +* Portability: + + - Enhance portability of `va_copy()`. + + - Test framework portability enhancements. + +* Documentation: + + - Distribute ``doc/upgrading.rst`` with the source tarball. + + - Build documentation in strict mode in ``make distcheck``. + + +Version 2.0 +=========== + +Released 2011-02-28 + +This release is backwards incompatible with the 1.x release series. +See the chapter "Upgrading from older versions" in documentation for +details. + +* Backwards incompatible changes: + + - Unify unsigned integer usage in the API: All occurences of + unsigned int and unsigned long have been replaced with size_t. + + - Change JSON integer's underlying type to the widest signed integer + type available, i.e. long long if it's supported, otherwise long. + Add a typedef json_int_t that defines the type. + + - Change the maximum indentation depth to 31 spaces in encoder. This + frees up bits from the flags parameter of encoding functions + `json_dumpf()`, `json_dumps()` and `json_dump_file()`. + + - For future needs, add a flags parameter to all decoding functions + `json_loadf()`, `json_loads()` and `json_load_file()`. + +* New features + + - `json_pack()`, `json_pack_ex()`, `json_vpack_ex()`: Create JSON + values based on a format string. + + - `json_unpack()`, `json_unpack_ex()`, `json_vunpack_ex()`: Simple + value extraction and validation functionality based on a format + string. + + - Add column, position and source fields to the ``json_error_t`` + struct. + + - Enhance error reporting in the decoder. + + - ``JANSSON_VERSION`` et al.: Preprocessor constants that define the + library version. + + - `json_set_alloc_funcs()`: Set custom memory allocation functions. + +* Fix many portability issues, especially on Windows. + +* Configuration + + - Add file ``jansson_config.h`` that contains site specific + configuration. It's created automatically by the configure script, + or can be created by hand if the configure script cannot be used. + The file ``jansson_config.h.win32`` can be used without + modifications on Windows systems. + + - Add a section to documentation describing how to build Jansson on + Windows. + + - Documentation now requires Sphinx 1.0 or newer. + + +Version 1.3 +=========== + +Released 2010-06-13 + +* New functions: + + - `json_object_iter_set()`, `json_object_iter_set_new()`: Change + object contents while iterating over it. + + - `json_object_iter_at()`: Return an iterator that points to a + specific object item. + +* New encoding flags: + + - ``JSON_PRESERVE_ORDER``: Preserve the insertion order of object + keys. + +* Bug fixes: + + - Fix an error that occured when an array or object was first + encoded as empty, then populated with some data, and then + re-encoded + + - Fix the situation like above, but when the first encoding resulted + in an error + +* Documentation: + + - Clarify the documentation on reference stealing, providing an + example usage pattern + + +Version 1.2.1 +============= + +Released 2010-04-03 + +* Bug fixes: + + - Fix reference counting on ``true``, ``false`` and ``null`` + - Estimate real number underflows in decoder with 0.0 instead of + issuing an error + +* Portability: + + - Make ``int32_t`` available on all systems + - Support compilers that don't have the ``inline`` keyword + - Require Autoconf 2.60 (for ``int32_t``) + +* Tests: + + - Print test names correctly when ``VERBOSE=1`` + - ``test/suites/api``: Fail when a test fails + - Enhance tests for iterators + - Enhance tests for decoding texts that contain null bytes + +* Documentation: + + - Don't remove ``changes.rst`` in ``make clean`` + - Add a chapter on RFC conformance + + +Version 1.2 +=========== + +Released 2010-01-21 + +* New functions: + + - `json_equal()`: Test whether two JSON values are equal + - `json_copy()` and `json_deep_copy()`: Make shallow and deep copies + of JSON values + - Add a version of all functions taking a string argument that + doesn't check for valid UTF-8: `json_string_nocheck()`, + `json_string_set_nocheck()`, `json_object_set_nocheck()`, + `json_object_set_new_nocheck()` + +* New encoding flags: + + - ``JSON_SORT_KEYS``: Sort objects by key + - ``JSON_ENSURE_ASCII``: Escape all non-ASCII Unicode characters + - ``JSON_COMPACT``: Use a compact representation with all unneeded + whitespace stripped + +* Bug fixes: + + - Revise and unify whitespace usage in encoder: Add spaces between + array and object items, never append newline to output. + - Remove const qualifier from the ``json_t`` parameter in + `json_string_set()`, `json_integer_set()` and `json_real_set`. + - Use ``int32_t`` internally for representing Unicode code points + (int is not enough on all platforms) + +* Other changes: + + - Convert ``CHANGES`` (this file) to reStructured text and add it to + HTML documentation + - The test system has been refactored. Python is no longer required + to run the tests. + - Documentation can now be built by invoking ``make html`` + - Support for pkg-config + + +Version 1.1.3 +============= + +Released 2009-12-18 + +* Encode reals correctly, so that first encoding and then decoding a + real always produces the same value +* Don't export private symbols in ``libjansson.so`` + + +Version 1.1.2 +============= + +Released 2009-11-08 + +* Fix a bug where an error message was not produced if the input file + could not be opened in `json_load_file()` +* Fix an assertion failure in decoder caused by a minus sign without a + digit after it +* Remove an unneeded include of ``stdint.h`` in ``jansson.h`` + + +Version 1.1.1 +============= + +Released 2009-10-26 + +* All documentation files were not distributed with v1.1; build + documentation in make distcheck to prevent this in the future +* Fix v1.1 release date in ``CHANGES`` + + +Version 1.1 +=========== + +Released 2009-10-20 + +* API additions and improvements: + + - Extend array and object APIs + - Add functions to modify integer, real and string values + - Improve argument validation + - Use unsigned int instead of ``uint32_t`` for encoding flags + +* Enhance documentation + + - Add getting started guide and tutorial + - Fix some typos + - General clarifications and cleanup + +* Check for integer and real overflows and underflows in decoder +* Make singleton values thread-safe (``true``, ``false`` and ``null``) +* Enhance circular reference handling +* Don't define ``-std=c99`` in ``AM_CFLAGS`` +* Add C++ guards to ``jansson.h`` +* Minor performance and portability improvements +* Expand test coverage + + +Version 1.0.4 +============= + +Released 2009-10-11 + +* Relax Autoconf version requirement to 2.59 +* Make Jansson compile on platforms where plain ``char`` is unsigned +* Fix API tests for object + + +Version 1.0.3 +============= + +Released 2009-09-14 + +* Check for integer and real overflows and underflows in decoder +* Use the Python json module for tests, or simplejson if the json + module is not found +* Distribute changelog (this file) + + +Version 1.0.2 +============= + +Released 2009-09-08 + +* Handle EOF correctly in decoder + + +Version 1.0.1 +============= + +Released 2009-09-04 + +* Fixed broken `json_is_boolean()` + + +Version 1.0 +=========== + +Released 2009-08-25 + +* Initial release diff --git a/solo-ckpool-source/src/jansson-2.14/CMakeLists.txt b/solo-ckpool-source/src/jansson-2.14/CMakeLists.txt new file mode 100644 index 0000000..39b9ad3 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/CMakeLists.txt @@ -0,0 +1,662 @@ +cmake_minimum_required (VERSION 3.1) +project(jansson C) + +# Options +option(JANSSON_BUILD_SHARED_LIBS "Build shared libraries." OFF) +option(USE_URANDOM "Use /dev/urandom to seed the hash function." ON) +option(USE_WINDOWS_CRYPTOAPI "Use CryptGenRandom to seed the hash function." ON) + +if (MSVC) + # This option must match the settings used in your program, in particular if you + # are linking statically + option(JANSSON_STATIC_CRT "Link the static CRT libraries" OFF ) +endif () + +option(JANSSON_EXAMPLES "Compile example applications" ON) + +if (UNIX) + option(JANSSON_COVERAGE "(GCC Only! Requires gcov/lcov to be installed). Include target for doing coverage analysis for the test suite. Note that -DCMAKE_BUILD_TYPE=Debug must be set" OFF) +endif () + +# Set some nicer output dirs. +set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/bin) +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib) +set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib) +set(JANSSON_TEMP_DIR ${CMAKE_CURRENT_BINARY_DIR}/tmp) + +# Give the debug version a different postfix for windows, +# so both the debug and release version can be built in the +# same build-tree on Windows (MSVC). +if (WIN32 AND NOT CMAKE_DEBUG_POSTFIX) + set(CMAKE_DEBUG_POSTFIX "_d") +endif() + +# This is how I thought it should go +# set (JANSSON_VERSION "2.3.1") +# set (JANSSON_SOVERSION 2) + +set(JANSSON_DISPLAY_VERSION "2.14") + +# This is what is required to match the same numbers as automake's +set(JANSSON_VERSION "4.14.0") +set(JANSSON_SOVERSION 4) + +# for CheckFunctionKeywords +set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") + +include (CheckCSourceCompiles) +include (CheckFunctionExists) +include (CheckFunctionKeywords) +include (CheckIncludeFiles) +include (CheckTypeSize) + +# suppress format-truncation warning +include (CheckCCompilerFlag) +check_c_compiler_flag(-Wno-format-truncation HAS_NO_FORMAT_TRUNCATION) +if (HAS_NO_FORMAT_TRUNCATION) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-format-truncation") +endif() + +if (MSVC) + # Turn off Microsofts "security" warnings. + add_definitions( "/W3 /D_CRT_SECURE_NO_WARNINGS /wd4005 /wd4996 /nologo" ) + + if (JANSSON_STATIC_CRT) + set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /MT") + set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /MTd") + endif() +endif() + +message("C compiler: ${CMAKE_C_COMPILER_ID}") + +if (JANSSON_COVERAGE) + include(CodeCoverage) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O0 -fprofile-arcs -ftest-coverage") +endif() + +check_include_files (endian.h HAVE_ENDIAN_H) +check_include_files (fcntl.h HAVE_FCNTL_H) +check_include_files (sched.h HAVE_SCHED_H) +check_include_files (unistd.h HAVE_UNISTD_H) +check_include_files (sys/param.h HAVE_SYS_PARAM_H) +check_include_files (sys/stat.h HAVE_SYS_STAT_H) +check_include_files (sys/time.h HAVE_SYS_TIME_H) +check_include_files (sys/types.h HAVE_SYS_TYPES_H) + +check_function_exists (close HAVE_CLOSE) +check_function_exists (getpid HAVE_GETPID) +check_function_exists (gettimeofday HAVE_GETTIMEOFDAY) +check_function_exists (open HAVE_OPEN) +check_function_exists (read HAVE_READ) +check_function_exists (sched_yield HAVE_SCHED_YIELD) + +# Check for the int-type includes +check_include_files (stdint.h HAVE_STDINT_H) + +# Check our 64 bit integer sizes +check_type_size (__int64 __INT64) +check_type_size (int64_t INT64_T) +check_type_size ("long long" LONG_LONG_INT) + +# Check our 32 bit integer sizes +check_type_size (int32_t INT32_T) +check_type_size (__int32 __INT32) +check_type_size ("long" LONG_INT) +check_type_size ("int" INT) +if (HAVE_INT32_T) + set (JSON_INT32 int32_t) +elseif (HAVE___INT32) + set (JSON_INT32 __int32) +elseif (HAVE_LONG_INT AND (LONG_INT EQUAL 4)) + set (JSON_INT32 long) +elseif (HAVE_INT AND (INT EQUAL 4)) + set (JSON_INT32 int) +else () + message (FATAL_ERROR "Could not detect a valid 32-bit integer type") +endif () + +check_type_size ("unsigned long" UNSIGNED_LONG_INT) +check_type_size ("unsigned int" UNSIGNED_INT) +check_type_size ("unsigned short" UNSIGNED_SHORT) + +check_type_size (uint32_t UINT32_T) +check_type_size (__uint32 __UINT32) +if (HAVE_UINT32_T) + set (JSON_UINT32 uint32_t) +elseif (HAVE___UINT32) + set (JSON_UINT32 __uint32) +elseif (HAVE_UNSIGNED_LONG_INT AND (UNSIGNED_LONG_INT EQUAL 4)) + set (JSON_UINT32 "unsigned long") +elseif (HAVE_UNSIGNED_INT AND (UNSIGNED_INT EQUAL 4)) + set (JSON_UINT32 "unsigned int") +else () + message (FATAL_ERROR "Could not detect a valid unsigned 32-bit integer type") +endif () + +check_type_size (uint16_t UINT16_T) +check_type_size (__uint16 __UINT16) +if (HAVE_UINT16_T) + set (JSON_UINT16 uint16_t) +elseif (HAVE___UINT16) + set (JSON_UINT16 __uint16) +elseif (HAVE_UNSIGNED_INT AND (UNSIGNED_INT EQUAL 2)) + set (JSON_UINT16 "unsigned int") +elseif (HAVE_UNSIGNED_SHORT AND (UNSIGNED_SHORT EQUAL 2)) + set (JSON_UINT16 "unsigned short") +else () + message (FATAL_ERROR "Could not detect a valid unsigned 16-bit integer type") +endif () + +check_type_size (uint8_t UINT8_T) +check_type_size (__uint8 __UINT8) +if (HAVE_UINT8_T) + set (JSON_UINT8 uint8_t) +elseif (HAVE___UINT8) + set (JSON_UINT8 __uint8) +else () + set (JSON_UINT8 "unsigned char") +endif () + +# Check for ssize_t and SSIZE_T existence. +check_type_size(ssize_t SSIZE_T) +check_type_size(SSIZE_T UPPERCASE_SSIZE_T) +if(NOT HAVE_SSIZE_T) + if(HAVE_UPPERCASE_SSIZE_T) + set(JSON_SSIZE SSIZE_T) + else() + set(JSON_SSIZE int) + endif() +endif() +set(CMAKE_EXTRA_INCLUDE_FILES "") + +# Check for all the variants of strtoll +check_function_exists (strtoll HAVE_STRTOLL) +check_function_exists (strtoq HAVE_STRTOQ) +check_function_exists (_strtoi64 HAVE__STRTOI64) + +# Figure out what variant we should use +if (HAVE_STRTOLL) + set (JSON_STRTOINT strtoll) +elseif (HAVE_STRTOQ) + set (JSON_STRTOINT strtoq) +elseif (HAVE__STRTOI64) + set (JSON_STRTOINT _strtoi64) +else () + # fallback to strtol (32 bit) + # this will set all the required variables + set (JSON_STRTOINT strtol) + set (JSON_INT_T long) + set (JSON_INTEGER_FORMAT "\"ld\"") +endif () + +# if we haven't defined JSON_INT_T, then we have a 64 bit conversion function. +# detect what to use for the 64 bit type. +# Note: I will prefer long long if I can get it, as that is what the automake system aimed for. +if (NOT DEFINED JSON_INT_T) + if (HAVE_LONG_LONG_INT AND (LONG_LONG_INT EQUAL 8)) + set (JSON_INT_T "long long") + elseif (HAVE_INT64_T) + set (JSON_INT_T int64_t) + elseif (HAVE___INT64) + set (JSON_INT_T __int64) + else () + message (FATAL_ERROR "Could not detect 64 bit type, although I detected the strtoll equivalent") + endif () + + # Apparently, Borland BCC and MSVC wants I64d, + # Borland BCC could also accept LD + # and gcc wants ldd, + # I am not sure what cygwin will want, so I will assume I64d + + if (WIN32) # matches both msvc and cygwin + set (JSON_INTEGER_FORMAT "\"I64d\"") + else () + set (JSON_INTEGER_FORMAT "\"lld\"") + endif () +endif () + + +# If locale.h and localeconv() are available, define to 1, otherwise to 0. +check_include_files (locale.h HAVE_LOCALE_H) +check_function_exists (localeconv HAVE_LOCALECONV) + +if (HAVE_LOCALECONV AND HAVE_LOCALE_H) + set (JSON_HAVE_LOCALECONV 1) +else () + set (JSON_HAVE_LOCALECONV 0) +endif() + +# check if we have setlocale +check_function_exists(setlocale HAVE_SETLOCALE) + +# Check what the inline keyword is. +# Note that the original JSON_INLINE was always set to just 'inline', so this goes further. +check_function_keywords("inline") +check_function_keywords("__inline") +check_function_keywords("__inline__") + +if (HAVE_INLINE) + set(JSON_INLINE inline) +elseif (HAVE___INLINE) + set(JSON_INLINE __inline) +elseif (HAVE___INLINE__) + set(JSON_INLINE __inline__) +else() + # no inline on this platform + set (JSON_INLINE) +endif() + +check_c_source_compiles ("int main() { unsigned long val; __sync_bool_compare_and_swap(&val, 0, 1); __sync_add_and_fetch(&val, 1); __sync_sub_and_fetch(&val, 1); return 0; } " HAVE_SYNC_BUILTINS) +check_c_source_compiles ("int main() { char l; unsigned long v; __atomic_test_and_set(&l, __ATOMIC_RELAXED); __atomic_store_n(&v, 1, __ATOMIC_RELEASE); __atomic_load_n(&v, __ATOMIC_ACQUIRE); __atomic_add_fetch(&v, 1, __ATOMIC_ACQUIRE); __atomic_sub_fetch(&v, 1, __ATOMIC_RELEASE); return 0; }" HAVE_ATOMIC_BUILTINS) + +if (HAVE_SYNC_BUILTINS) + set(JSON_HAVE_SYNC_BUILTINS 1) +else() + set(JSON_HAVE_SYNC_BUILTINS 0) +endif() + +if (HAVE_ATOMIC_BUILTINS) + set(JSON_HAVE_ATOMIC_BUILTINS 1) +else() + set(JSON_HAVE_ATOMIC_BUILTINS 0) +endif() + +set (JANSSON_INITIAL_HASHTABLE_ORDER 3 CACHE STRING "Number of buckets new object hashtables contain is 2 raised to this power. The default is 3, so empty hashtables contain 2^3 = 8 buckets.") + +# configure the public config file +configure_file (${CMAKE_CURRENT_SOURCE_DIR}/cmake/jansson_config.h.cmake + ${CMAKE_CURRENT_BINARY_DIR}/include/jansson_config.h) + +# Copy the jansson.h file to the public include folder +file (COPY ${CMAKE_CURRENT_SOURCE_DIR}/src/jansson.h + DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/include/) + +add_definitions(-DJANSSON_USING_CMAKE) + +# configure the private config file +configure_file (${CMAKE_CURRENT_SOURCE_DIR}/cmake/jansson_private_config.h.cmake + ${CMAKE_CURRENT_BINARY_DIR}/private_include/jansson_private_config.h) + +# and tell the source code to include it +add_definitions(-DHAVE_CONFIG_H) + +include_directories (${CMAKE_CURRENT_BINARY_DIR}/include) +include_directories (${CMAKE_CURRENT_BINARY_DIR}/private_include) + +# Add the lib sources. +file(GLOB JANSSON_SRC src/*.c) + +set(JANSSON_HDR_PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR}/src/hashtable.h + ${CMAKE_CURRENT_SOURCE_DIR}/src/jansson_private.h + ${CMAKE_CURRENT_SOURCE_DIR}/src/strbuffer.h + ${CMAKE_CURRENT_SOURCE_DIR}/src/utf.h + ${CMAKE_CURRENT_BINARY_DIR}/private_include/jansson_private_config.h) + +set(JANSSON_HDR_PUBLIC + ${CMAKE_CURRENT_BINARY_DIR}/include/jansson_config.h + ${CMAKE_CURRENT_SOURCE_DIR}/src/jansson.h) + +source_group("Library Sources" FILES ${JANSSON_SRC}) +source_group("Library Private Headers" FILES ${JANSSON_HDR_PRIVATE}) +source_group("Library Public Headers" FILES ${JANSSON_HDR_PUBLIC}) + +if(JANSSON_BUILD_SHARED_LIBS) + add_library(jansson SHARED + ${JANSSON_SRC} + ${JANSSON_HDR_PRIVATE} + ${JANSSON_HDR_PUBLIC} + src/jansson.def) + +# check if linker support --default-symver + list(APPEND CMAKE_REQUIRED_LIBRARIES "-Wl,--default-symver") + check_c_source_compiles( + " + int main (void) + { + return 0; + } + " + DSYMVER_WORKS + ) + list(REMOVE_ITEM CMAKE_REQUIRED_LIBRARIES "-Wl,--default-symver") + + if (SYMVER_WORKS) + set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--default-symver") + else() +# some linkers may only support --version-script + file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/jansson.sym" "JANSSON_${JANSSON_SOVERSION} { + global: + *; +}; +") + list(APPEND CMAKE_REQUIRED_LIBRARIES "-Wl,--version-script,${CMAKE_CURRENT_BINARY_DIR}/jansson.sym") + check_c_source_compiles( + " + int main (void) + { + return 0; + } + " + VSCRIPT_WORKS + ) + list(REMOVE_ITEM CMAKE_REQUIRED_LIBRARIES "-Wl,--version-script,${CMAKE_CURRENT_BINARY_DIR}/jansson.sym") + if (VSCRIPT_WORKS) + set(CMAKE_SHARED_LINKER_FLAGS "-Wl,--version-script,${CMAKE_CURRENT_BINARY_DIR}/jansson.sym") + endif() + endif() + + set_target_properties(jansson PROPERTIES + VERSION ${JANSSON_VERSION} + SOVERSION ${JANSSON_SOVERSION}) +else() + add_library(jansson STATIC + ${JANSSON_SRC} + ${JANSSON_HDR_PRIVATE} + ${JANSSON_HDR_PUBLIC}) + set_target_properties(jansson PROPERTIES + POSITION_INDEPENDENT_CODE true) +endif() + +if (JANSSON_EXAMPLES) + add_executable(simple_parse "${CMAKE_CURRENT_SOURCE_DIR}/examples/simple_parse.c") + target_link_libraries(simple_parse jansson) +endif() + +# For building Documentation (uses Sphinx) +option(JANSSON_BUILD_DOCS "Build documentation (uses python-sphinx)." ON) +if (JANSSON_BUILD_DOCS) + find_package(Sphinx) + + if (NOT SPHINX_FOUND) + message(WARNING "Sphinx not found. Cannot generate documentation! + Set -DJANSSON_BUILD_DOCS=OFF to get rid of this message.") + else() + if (Sphinx_VERSION_STRING VERSION_LESS 1.0) + message(WARNING "Your Sphinx version is too old! + This project requires Sphinx v1.0 or above to produce + proper documentation (you have v${Sphinx_VERSION_STRING}). + You will get output but it will have errors.") + endif() + + # configured documentation tools and intermediate build results + set(BINARY_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/_build") + + # Sphinx cache with pickled ReST documents + set(SPHINX_CACHE_DIR "${CMAKE_CURRENT_BINARY_DIR}/_doctrees") + + # CMake could be used to build the conf.py file too, + # eg it could automatically write the version of the program or change the theme. + # if(NOT DEFINED SPHINX_THEME) + # set(SPHINX_THEME default) + # endif() + # + # if(NOT DEFINED SPHINX_THEME_DIR) + # set(SPHINX_THEME_DIR) + # endif() + # + # configure_file( + # "${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in" + # "${BINARY_BUILD_DIR}/conf.py" + # @ONLY) + + # TODO: Add support for all sphinx builders: http://sphinx-doc.org/builders.html + + # Add documentation targets. + set(DOC_TARGETS html) + + option(JANSSON_BUILD_MAN "Create a target for building man pages." ON) + + if (JANSSON_BUILD_MAN) + if (Sphinx_VERSION_STRING VERSION_LESS 1.0) + message(WARNING "Sphinx version 1.0 > is required to build man pages. You have v${Sphinx_VERSION_STRING}.") + else() + list(APPEND DOC_TARGETS man) + endif() + endif() + + option(JANSSON_BUILD_LATEX "Create a target for building latex docs (to create PDF)." OFF) + + if (JANSSON_BUILD_LATEX) + find_package(LATEX) + + if (NOT LATEX_COMPILER) + message("Couldn't find Latex, can't build latex docs using Sphinx") + else() + message("Latex found! If you have problems building, see Sphinx documentation for required Latex packages.") + list(APPEND DOC_TARGETS latex) + endif() + endif() + + # The doc target will build all documentation targets. + add_custom_target(doc) + + foreach (DOC_TARGET ${DOC_TARGETS}) + add_custom_target(${DOC_TARGET} + ${SPHINX_EXECUTABLE} + # -q # Enable for quiet mode + -b ${DOC_TARGET} + -d "${SPHINX_CACHE_DIR}" + # -c "${BINARY_BUILD_DIR}" # enable if using cmake-generated conf.py + "${CMAKE_CURRENT_SOURCE_DIR}/doc" + "${CMAKE_CURRENT_BINARY_DIR}/doc/${DOC_TARGET}" + COMMENT "Building ${DOC_TARGET} documentation with Sphinx") + + add_dependencies(doc ${DOC_TARGET}) + endforeach() + + message("Building documentation enabled for: ${DOC_TARGETS}") + endif() +endif () + + +option(JANSSON_WITHOUT_TESTS "Don't build tests ('make test' to execute tests)" OFF) + +if (NOT JANSSON_WITHOUT_TESTS) + option(JANSSON_TEST_WITH_VALGRIND "Enable valgrind tests." OFF) + + ENABLE_TESTING() + + if (JANSSON_TEST_WITH_VALGRIND) + # TODO: Add FindValgrind.cmake instead of having a hardcoded path. + + add_definitions(-DVALGRIND) + + # enable valgrind + set(CMAKE_MEMORYCHECK_COMMAND valgrind) + set(CMAKE_MEMORYCHECK_COMMAND_OPTIONS + "--error-exitcode=1 --leak-check=full --show-reachable=yes --track-origins=yes -q") + + set(MEMCHECK_COMMAND + "${CMAKE_MEMORYCHECK_COMMAND} ${CMAKE_MEMORYCHECK_COMMAND_OPTIONS}") + separate_arguments(MEMCHECK_COMMAND) + endif () + + # + # Test suites. + # + if (CMAKE_COMPILER_IS_GNUCC) + add_definitions(-Wall -Wextra -Wdeclaration-after-statement) + endif () + + set(api_tests + test_array + test_chaos + test_copy + test_dump + test_dump_callback + test_equal + test_fixed_size + test_load + test_load_callback + test_loadb + test_number + test_object + test_pack + test_simple + test_sprintf + test_unpack) + + # Doing arithmetic on void pointers is not allowed by Microsofts compiler + # such as secure_malloc and secure_free is doing, so exclude it for now. + if (NOT MSVC) + list(APPEND api_tests test_memory_funcs) + endif() + + # Helper macro for building and linking a test program. + macro(build_testprog name dir) + add_executable(${name} ${dir}/${name}.c) + add_dependencies(${name} jansson) + target_link_libraries(${name} jansson) + endmacro(build_testprog) + + # Create executables and tests/valgrind tests for API tests. + foreach (test ${api_tests}) + build_testprog(${test} ${CMAKE_CURRENT_SOURCE_DIR}/test/suites/api) + + if (JANSSON_TEST_WITH_VALGRIND) + add_test(memcheck__${test} + ${MEMCHECK_COMMAND} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${test} + WORKING_DIRECTORY ${JANSSON_TEMP_DIR}) + else() + add_test(${test} + ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${test} + WORKING_DIRECTORY ${JANSSON_TEMP_DIR}) + endif () + endforeach () + + # Test harness for the suites tests. + build_testprog(json_process ${CMAKE_CURRENT_SOURCE_DIR}/test/bin) + + set(SUITE_TEST_CMD ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/json_process) + set(SUITES encoding-flags valid invalid invalid-unicode) + foreach (SUITE ${SUITES}) + file(GLOB TESTDIRS test/suites/${SUITE}/*) + + foreach (TESTDIR ${TESTDIRS}) + if (IS_DIRECTORY ${TESTDIR}) + get_filename_component(TNAME ${TESTDIR} NAME) + + if (JANSSON_TEST_WITH_VALGRIND) + add_test(memcheck__${SUITE}__${TNAME} + ${MEMCHECK_COMMAND} ${SUITE_TEST_CMD} ${TESTDIR}) + else() + add_test(${SUITE}__${TNAME} + ${SUITE_TEST_CMD} ${TESTDIR}) + endif() + + if ((${SUITE} STREQUAL "valid" OR ${SUITE} STREQUAL "invalid") AND NOT EXISTS ${TESTDIR}/nostrip) + if (JANSSON_TEST_WITH_VALGRIND) + add_test(memcheck__${SUITE}__${TNAME}__strip + ${MEMCHECK_COMMAND} ${SUITE_TEST_CMD} --strip ${TESTDIR}) + else() + add_test(${SUITE}__${TNAME}__strip + ${SUITE_TEST_CMD} --strip ${TESTDIR}) + endif() + endif () + endif () + endforeach () + endforeach () + + if (JANSSON_COVERAGE) + SETUP_TARGET_FOR_COVERAGE(coverage coverage ctest) + endif () + + # Enable using "make check" just like the autotools project. + # By default cmake creates a target "make test" + add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND} + DEPENDS json_process ${api_tests}) +endif () + +# +# Installation preparation. +# + +# Allow the user to override installation directories. +set(JANSSON_INSTALL_LIB_DIR lib CACHE PATH "Installation directory for libraries") +set(JANSSON_INSTALL_BIN_DIR bin CACHE PATH "Installation directory for executables") +set(JANSSON_INSTALL_INCLUDE_DIR include CACHE PATH "Installation directory for header files") + +if(WIN32 AND NOT CYGWIN) + set(DEF_INSTALL_CMAKE_DIR cmake) +else() + set(DEF_INSTALL_CMAKE_DIR lib/cmake/jansson) +endif() + +set(JANSSON_INSTALL_CMAKE_DIR ${DEF_INSTALL_CMAKE_DIR} CACHE PATH "Installation directory for CMake files") + +# Create pkg-conf file. +# (We use the same files as ./configure does, so we +# have to defined the same variables used there). +set(prefix ${CMAKE_INSTALL_PREFIX}) +set(exec_prefix "\${prefix}") +set(libdir "\${exec_prefix}/${JANSSON_INSTALL_LIB_DIR}") +set(includedir "\${prefix}/${JANSSON_INSTALL_INCLUDE_DIR}") +set(VERSION ${JANSSON_DISPLAY_VERSION}) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/jansson.pc.in + ${CMAKE_CURRENT_BINARY_DIR}/jansson.pc @ONLY) + +# Make sure the paths are relative. +foreach(p LIB BIN INCLUDE CMAKE) + set(var JANSSON_INSTALL_${p}_DIR) +endforeach() + +# Generate the config file for the build-tree. +set(JANSSON__INCLUDE_DIRS + "${CMAKE_CURRENT_SOURCE_DIR}/include" + "${CMAKE_CURRENT_BINARY_DIR}/include") +set(JANSSON_INCLUDE_DIRS ${JANSSON__INCLUDE_DIRS} CACHE PATH "Jansson include directories") +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cmake/janssonConfig.cmake.in + ${CMAKE_CURRENT_BINARY_DIR}/janssonConfig.cmake + @ONLY) + + +# Generate the config file for the installation tree. +include(CMakePackageConfigHelpers) + +write_basic_package_version_file( + "${CMAKE_CURRENT_BINARY_DIR}/cmake/janssonConfigVersion.cmake" + VERSION ${JANSSON_VERSION} + COMPATIBILITY ExactVersion +) + +configure_package_config_file( + "cmake/janssonConfig.cmake.in" + "${CMAKE_CURRENT_BINARY_DIR}/cmake/janssonConfig.cmake" + INSTALL_DESTINATION "${JANSSON_INSTALL_CMAKE_DIR}" +) + +# +# Install targets. +# +option(JANSSON_INSTALL "Generate installation target" ON) +if (JANSSON_INSTALL) + install(TARGETS jansson + EXPORT janssonTargets + LIBRARY DESTINATION "lib" + ARCHIVE DESTINATION "lib" + RUNTIME DESTINATION "bin" + INCLUDES DESTINATION "include") + + install(FILES ${JANSSON_HDR_PUBLIC} + DESTINATION "include") + + # Install the pkg-config. + install(FILES + ${CMAKE_CURRENT_BINARY_DIR}/jansson.pc + DESTINATION lib/pkgconfig) + + # Install the configs. + install(FILES + ${CMAKE_CURRENT_BINARY_DIR}/cmake/janssonConfig.cmake + ${CMAKE_CURRENT_BINARY_DIR}/cmake/janssonConfigVersion.cmake + DESTINATION "${JANSSON_INSTALL_CMAKE_DIR}") + + # Install exports for the install-tree. + install(EXPORT janssonTargets + NAMESPACE jansson:: + DESTINATION "${JANSSON_INSTALL_CMAKE_DIR}") +endif() + +# For use when simply using add_library from a parent project to build jansson. +set(JANSSON_LIBRARIES jansson CACHE STRING "jansson libraries") diff --git a/solo-ckpool-source/src/jansson-2.14/LICENSE b/solo-ckpool-source/src/jansson-2.14/LICENSE new file mode 100644 index 0000000..483459c --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2009-2020 Petri Lehtinen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/solo-ckpool-source/src/jansson-2.14/Makefile.am b/solo-ckpool-source/src/jansson-2.14/Makefile.am new file mode 100644 index 0000000..bbeefbd --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/Makefile.am @@ -0,0 +1,13 @@ +ACLOCAL_AMFLAGS = -I m4 +EXTRA_DIST = CHANGES LICENSE README.rst CMakeLists.txt cmake android examples scripts +SUBDIRS = doc src + +# "make distcheck" builds the dvi target, so use it to check that the +# documentation is built correctly. +dvi: + $(MAKE) SPHINXOPTS_EXTRA=-W html + +pkgconfigdir = $(libdir)/pkgconfig +pkgconfig_DATA = jansson.pc + +TESTS = scripts/clang-format-check diff --git a/solo-ckpool-source/src/jansson-2.14/README.rst b/solo-ckpool-source/src/jansson-2.14/README.rst new file mode 100644 index 0000000..83fc89a --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/README.rst @@ -0,0 +1,81 @@ +Jansson README +============== + +.. image:: https://github.com/akheron/jansson/workflows/tests/badge.svg + :target: https://github.com/akheron/jansson/actions + +.. image:: https://ci.appveyor.com/api/projects/status/lmhkkc4q8cwc65ko + :target: https://ci.appveyor.com/project/akheron/jansson + +.. image:: https://coveralls.io/repos/akheron/jansson/badge.png?branch=master + :target: https://coveralls.io/r/akheron/jansson?branch=master + +Jansson_ is a C library for encoding, decoding and manipulating JSON +data. Its main features and design principles are: + +- Simple and intuitive API and data model + +- `Comprehensive documentation`_ + +- No dependencies on other libraries + +- Full Unicode support (UTF-8) + +- Extensive test suite + +Jansson is licensed under the `MIT license`_; see LICENSE in the +source distribution for details. + + +Compilation and Installation +---------------------------- + +You can download and install Jansson using the `vcpkg `_ dependency manager: + +.. code-block:: bash + + git clone https://github.com/Microsoft/vcpkg.git + cd vcpkg + ./bootstrap-vcpkg.sh + ./vcpkg integrate install + vcpkg install jansson + +The Jansson port in vcpkg is kept up to date by Microsoft team members and community contributors. If the version is out of date, please `create an issue or pull request `_ on the vcpkg repository. + +If you obtained a `source tarball`_ from the "Releases" section of the main +site just use the standard autotools commands:: + + $ ./configure + $ make + $ make install + +To run the test suite, invoke:: + + $ make check + +If the source has been checked out from a Git repository, the +./configure script has to be generated first. The easiest way is to +use autoreconf:: + + $ autoreconf -i + + +Documentation +------------- + +Documentation is available at http://jansson.readthedocs.io/en/latest/. + +The documentation source is in the ``doc/`` subdirectory. To generate +HTML documentation, invoke:: + + $ make html + +Then, point your browser to ``doc/_build/html/index.html``. Sphinx_ +1.0 or newer is required to generate the documentation. + + +.. _Jansson: http://www.digip.org/jansson/ +.. _`Comprehensive documentation`: http://jansson.readthedocs.io/en/latest/ +.. _`MIT license`: http://www.opensource.org/licenses/mit-license.php +.. _`source tarball`: http://www.digip.org/jansson#releases +.. _Sphinx: http://sphinx.pocoo.org/ diff --git a/solo-ckpool-source/src/jansson-2.14/android/jansson_config.h b/solo-ckpool-source/src/jansson-2.14/android/jansson_config.h new file mode 100644 index 0000000..618a0da --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/android/jansson_config.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2010-2016 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + * + * + * This file specifies a part of the site-specific configuration for + * Jansson, namely those things that affect the public API in + * jansson.h. + * + * The configure script copies this file to jansson_config.h and + * replaces @var@ substitutions by values that fit your system. If you + * cannot run the configure script, you can do the value substitution + * by hand. + */ + +#ifndef JANSSON_CONFIG_H +#define JANSSON_CONFIG_H + +/* If your compiler supports the inline keyword in C, JSON_INLINE is + defined to `inline', otherwise empty. In C++, the inline is always + supported. */ +#ifdef __cplusplus +#define JSON_INLINE inline +#else +#define JSON_INLINE inline +#endif + +/* If your compiler supports the `long long` type and the strtoll() + library function, JSON_INTEGER_IS_LONG_LONG is defined to 1, + otherwise to 0. */ +#define JSON_INTEGER_IS_LONG_LONG 1 + +/* If locale.h and localeconv() are available, define to 1, + otherwise to 0. */ +#define JSON_HAVE_LOCALECONV 0 + +/* Maximum recursion depth for parsing JSON input. + This limits the depth of e.g. array-within-array constructions. */ +#define JSON_PARSER_MAX_DEPTH 2048 + +#endif diff --git a/solo-ckpool-source/src/jansson-2.14/cmake/CheckFunctionKeywords.cmake b/solo-ckpool-source/src/jansson-2.14/cmake/CheckFunctionKeywords.cmake new file mode 100644 index 0000000..44601fd --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/cmake/CheckFunctionKeywords.cmake @@ -0,0 +1,15 @@ +include(CheckCSourceCompiles) + +macro(check_function_keywords _wordlist) + set(${_result} "") + foreach(flag ${_wordlist}) + string(REGEX REPLACE "[-+/ ()]" "_" flagname "${flag}") + string(TOUPPER "${flagname}" flagname) + set(have_flag "HAVE_${flagname}") + check_c_source_compiles("${flag} void func(); void func() { } int main() { func(); return 0; }" ${have_flag}) + if(${have_flag} AND NOT ${_result}) + set(${_result} "${flag}") +# break() + endif(${have_flag} AND NOT ${_result}) + endforeach(flag) +endmacro(check_function_keywords) diff --git a/solo-ckpool-source/src/jansson-2.14/cmake/CodeCoverage.cmake b/solo-ckpool-source/src/jansson-2.14/cmake/CodeCoverage.cmake new file mode 100644 index 0000000..3a21d3d --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/cmake/CodeCoverage.cmake @@ -0,0 +1,163 @@ +# +# Boost Software License - Version 1.0 - August 17th, 2003 +# +# Permission is hereby granted, free of charge, to any person or organization +# obtaining a copy of the software and accompanying documentation covered by +# this license (the "Software") to use, reproduce, display, distribute, +# execute, and transmit the Software, and to prepare derivative works of the +# Software, and to permit third-parties to whom the Software is furnished to +# do so, all subject to the following: +# +# The copyright notices in the Software and this entire statement, including +# the above license grant, this restriction and the following disclaimer, +# must be included in all copies of the Software, in whole or in part, and +# all derivative works of the Software, unless such copies or derivative +# works are solely in the form of machine-executable object code generated by +# a source language processor. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT +# SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE +# FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. +# +# 2012-01-31, Lars Bilke +# - Enable Code Coverage +# +# 2013-09-17, Joakim Söderberg +# - Added support for Clang. +# - Some additional usage instructions. +# +# USAGE: +# 1. Copy this file into your cmake modules path. +# +# 2. Add the following line to your CMakeLists.txt: +# INCLUDE(CodeCoverage) +# +# 3. Set compiler flags to turn off optimization and enable coverage: +# SET(CMAKE_CXX_FLAGS "-g -O0 -fprofile-arcs -ftest-coverage") +# SET(CMAKE_C_FLAGS "-g -O0 -fprofile-arcs -ftest-coverage") +# +# 3. Use the function SETUP_TARGET_FOR_COVERAGE to create a custom make target +# which runs your test executable and produces a lcov code coverage report: +# Example: +# SETUP_TARGET_FOR_COVERAGE( +# my_coverage_target # Name for custom target. +# test_driver # Name of the test driver executable that runs the tests. +# # NOTE! This should always have a ZERO as exit code +# # otherwise the coverage generation will not complete. +# coverage # Name of output directory. +# ) +# +# 4. Build a Debug build: +# cmake -DCMAKE_BUILD_TYPE=Debug .. +# make +# make my_coverage_target +# +# + +# Check prereqs +FIND_PROGRAM( GCOV_PATH gcov ) +FIND_PROGRAM( LCOV_PATH lcov ) +FIND_PROGRAM( GENHTML_PATH genhtml ) +FIND_PROGRAM( GCOVR_PATH gcovr PATHS ${CMAKE_SOURCE_DIR}/tests) + +IF(NOT GCOV_PATH) + MESSAGE(FATAL_ERROR "gcov not found! Aborting...") +ENDIF() # NOT GCOV_PATH + +IF(NOT (CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_GNUCC)) + # Clang version 3.0.0 and greater now supports gcov as well. + MESSAGE(WARNING "Compiler is not GNU gcc! Clang Version 3.0.0 and greater supports gcov as well, but older versions don't.") + + IF(NOT ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_C_COMPILER_ID}" STREQUAL "Clang")) + MESSAGE(FATAL_ERROR "Compiler is not GNU gcc or Clang! Aborting...") + ENDIF() +ENDIF() # NOT CMAKE_COMPILER_IS_GNUCXX + +IF ( NOT CMAKE_BUILD_TYPE STREQUAL "Debug" ) + MESSAGE( WARNING "Code coverage results with an optimized (non-Debug) build may be misleading" ) +ENDIF() # NOT CMAKE_BUILD_TYPE STREQUAL "Debug" + + +# Param _targetname The name of new the custom make target +# Param _outputname lcov output is generated as _outputname.info +# HTML report is generated in _outputname/index.html +# Param _testrunner The name of the target which runs the tests. +# MUST return ZERO always, even on errors. +# If not, no coverage report will be created! +# Optional fourth parameter is passed as arguments to _testrunner +# Pass them in list form, e.g.: "-j;2" for -j 2 +FUNCTION(SETUP_TARGET_FOR_COVERAGE _targetname _outputname _testrunner) + + IF(NOT LCOV_PATH) + MESSAGE(FATAL_ERROR "lcov not found! Aborting...") + ENDIF() # NOT LCOV_PATH + + IF(NOT GENHTML_PATH) + MESSAGE(FATAL_ERROR "genhtml not found! Aborting...") + ENDIF() # NOT GENHTML_PATH + + # Setup target + ADD_CUSTOM_TARGET(${_targetname} + + # Cleanup lcov + ${LCOV_PATH} --directory . --zerocounters + + # Run tests + COMMAND ${_testrunner} ${ARGV3} + + # Capturing lcov counters and generating report + COMMAND ${LCOV_PATH} --directory . --capture --output-file ${_outputname}.info --rc lcov_branch_coverage=1 + COMMAND ${LCOV_PATH} --remove ${_outputname}.info '*/build/include/*' '*/test/*' '/usr/include/*' --output-file ${_outputname}.info --rc lcov_branch_coverage=1 + # COMMAND ${GENHTML_PATH} --branch-coverage -o ${_outputname} ${_outputname}.info.cleaned + # COMMAND ${CMAKE_COMMAND} -E remove ${_outputname}.info ${_outputname}.info.cleaned + + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + COMMENT "Resetting code coverage counters to zero.\nProcessing code coverage counters and generating report." + ) + + # Show info where to find the report + ADD_CUSTOM_COMMAND(TARGET ${_targetname} POST_BUILD + COMMAND ; + COMMENT "Open ./${_outputname}/index.html in your browser to view the coverage report." + ) + +ENDFUNCTION() # SETUP_TARGET_FOR_COVERAGE + +# Param _targetname The name of new the custom make target +# Param _testrunner The name of the target which runs the tests +# Param _outputname cobertura output is generated as _outputname.xml +# Optional fourth parameter is passed as arguments to _testrunner +# Pass them in list form, e.g.: "-j;2" for -j 2 +FUNCTION(SETUP_TARGET_FOR_COVERAGE_COBERTURA _targetname _testrunner _outputname) + + IF(NOT PYTHON_EXECUTABLE) + MESSAGE(FATAL_ERROR "Python not found! Aborting...") + ENDIF() # NOT PYTHON_EXECUTABLE + + IF(NOT GCOVR_PATH) + MESSAGE(FATAL_ERROR "gcovr not found! Aborting...") + ENDIF() # NOT GCOVR_PATH + + ADD_CUSTOM_TARGET(${_targetname} + + # Run tests + ${_testrunner} ${ARGV3} + + # Running gcovr + COMMAND ${GCOVR_PATH} -x -r ${CMAKE_SOURCE_DIR} -e '${CMAKE_SOURCE_DIR}/tests/' -o ${_outputname}.xml + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + COMMENT "Running gcovr to produce Cobertura code coverage report." + ) + + # Show info where to find the report + ADD_CUSTOM_COMMAND(TARGET ${_targetname} POST_BUILD + COMMAND ; + COMMENT "Cobertura code coverage report saved in ${_outputname}.xml." + ) + +ENDFUNCTION() # SETUP_TARGET_FOR_COVERAGE_COBERTURA + diff --git a/solo-ckpool-source/src/jansson-2.14/cmake/FindSphinx.cmake b/solo-ckpool-source/src/jansson-2.14/cmake/FindSphinx.cmake new file mode 100644 index 0000000..3bf0a5d --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/cmake/FindSphinx.cmake @@ -0,0 +1,315 @@ +# +# PART B. DOWNLOADING AGREEMENT - LICENSE FROM SBIA WITH RIGHT TO SUBLICENSE ("SOFTWARE LICENSE"). +# ------------------------------------------------------------------------------------------------ +# +# 1. As used in this Software License, "you" means the individual downloading and/or +# using, reproducing, modifying, displaying and/or distributing the Software and +# the institution or entity which employs or is otherwise affiliated with such +# individual in connection therewith. The Section of Biomedical Image Analysis, +# Department of Radiology at the Universiy of Pennsylvania ("SBIA") hereby grants +# you, with right to sublicense, with respect to SBIA's rights in the software, +# and data, if any, which is the subject of this Software License (collectively, +# the "Software"), a royalty-free, non-exclusive license to use, reproduce, make +# derivative works of, display and distribute the Software, provided that: +# (a) you accept and adhere to all of the terms and conditions of this Software +# License; (b) in connection with any copy of or sublicense of all or any portion +# of the Software, all of the terms and conditions in this Software License shall +# appear in and shall apply to such copy and such sublicense, including without +# limitation all source and executable forms and on any user documentation, +# prefaced with the following words: "All or portions of this licensed product +# (such portions are the "Software") have been obtained under license from the +# Section of Biomedical Image Analysis, Department of Radiology at the University +# of Pennsylvania and are subject to the following terms and conditions:" +# (c) you preserve and maintain all applicable attributions, copyright notices +# and licenses included in or applicable to the Software; (d) modified versions +# of the Software must be clearly identified and marked as such, and must not +# be misrepresented as being the original Software; and (e) you consider making, +# but are under no obligation to make, the source code of any of your modifications +# to the Software freely available to others on an open source basis. +# +# 2. The license granted in this Software License includes without limitation the +# right to (i) incorporate the Software into proprietary programs (subject to +# any restrictions applicable to such programs), (ii) add your own copyright +# statement to your modifications of the Software, and (iii) provide additional +# or different license terms and conditions in your sublicenses of modifications +# of the Software; provided that in each case your use, reproduction or +# distribution of such modifications otherwise complies with the conditions +# stated in this Software License. +# +# 3. This Software License does not grant any rights with respect to third party +# software, except those rights that SBIA has been authorized by a third +# party to grant to you, and accordingly you are solely responsible for +# (i) obtaining any permissions from third parties that you need to use, +# reproduce, make derivative works of, display and distribute the Software, +# and (ii) informing your sublicensees, including without limitation your +# end-users, of their obligations to secure any such required permissions. +# +# 4. The Software has been designed for research purposes only and has not been +# reviewed or approved by the Food and Drug Administration or by any other +# agency. YOU ACKNOWLEDGE AND AGREE THAT CLINICAL APPLICATIONS ARE NEITHER +# RECOMMENDED NOR ADVISED. Any commercialization of the Software is at the +# sole risk of the party or parties engaged in such commercialization. +# You further agree to use, reproduce, make derivative works of, display +# and distribute the Software in compliance with all applicable governmental +# laws, regulations and orders, including without limitation those relating +# to export and import control. +# +# 5. The Software is provided "AS IS" and neither SBIA nor any contributor to +# the software (each a "Contributor") shall have any obligation to provide +# maintenance, support, updates, enhancements or modifications thereto. +# SBIA AND ALL CONTRIBUTORS SPECIFICALLY DISCLAIM ALL EXPRESS AND IMPLIED +# WARRANTIES OF ANY KIND INCLUDING, BUT NOT LIMITED TO, ANY WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. +# IN NO EVENT SHALL SBIA OR ANY CONTRIBUTOR BE LIABLE TO ANY PARTY FOR +# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY ARISING IN ANY WAY RELATED +# TO THE SOFTWARE, EVEN IF SBIA OR ANY CONTRIBUTOR HAS BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGES. TO THE MAXIMUM EXTENT NOT PROHIBITED BY LAW OR +# REGULATION, YOU FURTHER ASSUME ALL LIABILITY FOR YOUR USE, REPRODUCTION, +# MAKING OF DERIVATIVE WORKS, DISPLAY, LICENSE OR DISTRIBUTION OF THE SOFTWARE +# AND AGREE TO INDEMNIFY AND HOLD HARMLESS SBIA AND ALL CONTRIBUTORS FROM +# AND AGAINST ANY AND ALL CLAIMS, SUITS, ACTIONS, DEMANDS AND JUDGMENTS ARISING +# THEREFROM. +# +# 6. None of the names, logos or trademarks of SBIA or any of SBIA's affiliates +# or any of the Contributors, or any funding agency, may be used to endorse +# or promote products produced in whole or in part by operation of the Software +# or derived from or based on the Software without specific prior written +# permission from the applicable party. +# +# 7. Any use, reproduction or distribution of the Software which is not in accordance +# with this Software License shall automatically revoke all rights granted to you +# under this Software License and render Paragraphs 1 and 2 of this Software +# License null and void. +# +# 8. This Software License does not grant any rights in or to any intellectual +# property owned by SBIA or any Contributor except those rights expressly +# granted hereunder. +# +# +# PART C. MISCELLANEOUS +# --------------------- +# +# This Agreement shall be governed by and construed in accordance with the laws +# of The Commonwealth of Pennsylvania without regard to principles of conflicts +# of law. This Agreement shall supercede and replace any license terms that you +# may have agreed to previously with respect to Software from SBIA. +# +############################################################################## +# @file FindSphinx.cmake +# @brief Find Sphinx documentation build tools. +# +# @par Input variables: +# +# +# @tp @b Sphinx_DIR @endtp +# +# +# +# @tp @b SPHINX_DIR @endtp +# +# +# +# @tp @b Sphinx_FIND_COMPONENTS @endtp +# +# +#
Installation directory of Sphinx tools. Can also be set as environment variable.
Alternative environment variable for @c Sphinx_DIR.
Sphinx build tools to look for, i.e., 'apidoc' and/or 'build'.
+# +# @par Output variables: +# +# +# @tp @b Sphinx_FOUND @endtp +# +# +# +# @tp @b SPHINX_FOUND @endtp +# +# +# @tp @b SPHINX_EXECUTABLE @endtp +# +# +# +# @tp @b Sphinx_PYTHON_EXECUTABLE @endtp +# +# +# +# @tp @b Sphinx_PYTHON_OPTIONS @endtp +# +# +# +# @tp @b Sphinx-build_EXECUTABLE @endtp +# +# +# +# @tp @b Sphinx-apidoc_EXECUTABLE @endtp +# +# +# +# @tp @b Sphinx_VERSION_STRING @endtp +# +# +# +# @tp @b Sphinx_VERSION_MAJOR @endtp +# +# +# +# @tp @b Sphinx_VERSION_MINOR @endtp +# +# +# +# @tp @b Sphinx_VERSION_PATCH @endtp +# +# +#
Whether all or only the requested Sphinx build tools were found.
Alias for @c Sphinx_FOUND. +#
Non-cached alias for @c Sphinx-build_EXECUTABLE.
Python executable used to run sphinx-build. This is either the +# by default found Python interpreter or a specific version as +# specified by the shebang (#!) of the sphinx-build script.
A list of Python options extracted from the shebang (#!) of the +# sphinx-build script. The -E option is added by this module +# if the Python executable is not the system default to avoid +# problems with a differing setting of the @c PYTHONHOME.
Absolute path of the found sphinx-build tool.
Absolute path of the found sphinx-apidoc tool.
Sphinx version found e.g. 1.1.2.
Sphinx major version found e.g. 1.
Sphinx minor version found e.g. 1.
Sphinx patch version found e.g. 2.
+# +# @ingroup CMakeFindModules +############################################################################## + +set (_Sphinx_REQUIRED_VARS) + +# ---------------------------------------------------------------------------- +# initialize search +if (NOT Sphinx_DIR) + if (NOT $ENV{Sphinx_DIR} STREQUAL "") + set (Sphinx_DIR "$ENV{Sphinx_DIR}" CACHE PATH "Installation prefix of Sphinx (docutils)." FORCE) + else () + set (Sphinx_DIR "$ENV{SPHINX_DIR}" CACHE PATH "Installation prefix of Sphinx (docutils)." FORCE) + endif () +endif () + +# ---------------------------------------------------------------------------- +# default components to look for +if (NOT Sphinx_FIND_COMPONENTS) + set (Sphinx_FIND_COMPONENTS "build") +elseif (NOT Sphinx_FIND_COMPONENTS MATCHES "^(build|apidoc)$") + message (FATAL_ERROR "Invalid Sphinx component in: ${Sphinx_FIND_COMPONENTS}") +endif () + +# ---------------------------------------------------------------------------- +# find components, i.e., build tools +foreach (_Sphinx_TOOL IN LISTS Sphinx_FIND_COMPONENTS) + if (Sphinx_DIR) + find_program ( + Sphinx-${_Sphinx_TOOL}_EXECUTABLE + NAMES sphinx-${_Sphinx_TOOL} sphinx-${_Sphinx_TOOL}.py + HINTS "${Sphinx_DIR}" + PATH_SUFFIXES bin + DOC "The sphinx-${_Sphinx_TOOL} Python script." + NO_DEFAULT_PATH + ) + else () + find_program ( + Sphinx-${_Sphinx_TOOL}_EXECUTABLE + NAMES sphinx-${_Sphinx_TOOL} sphinx-${_Sphinx_TOOL}.py + DOC "The sphinx-${_Sphinx_TOOL} Python script." + ) + endif () + mark_as_advanced (Sphinx-${_Sphinx_TOOL}_EXECUTABLE) + list (APPEND _Sphinx_REQUIRED_VARS Sphinx-${_Sphinx_TOOL}_EXECUTABLE) +endforeach () + +# ---------------------------------------------------------------------------- +# determine Python executable used by Sphinx +if (Sphinx-build_EXECUTABLE) + # extract python executable from shebang of sphinx-build + find_package (PythonInterp QUIET) + set (Sphinx_PYTHON_EXECUTABLE "${PYTHON_EXECUTABLE}") + set (Sphinx_PYTHON_OPTIONS) + file (STRINGS "${Sphinx-build_EXECUTABLE}" FIRST_LINE LIMIT_COUNT 1) + if (FIRST_LINE MATCHES "^#!(.*/python.*)") # does not match "#!/usr/bin/env python" ! + string (REGEX REPLACE "^ +| +$" "" Sphinx_PYTHON_EXECUTABLE "${CMAKE_MATCH_1}") + if (Sphinx_PYTHON_EXECUTABLE MATCHES "([^ ]+) (.*)") + set (Sphinx_PYTHON_EXECUTABLE "${CMAKE_MATCH_1}") + string (REGEX REPLACE " +" ";" Sphinx_PYTHON_OPTIONS "${CMAKE_MATCH_2}") + endif () + endif () + # this is done to avoid problems with multiple Python versions being installed + # remember: CMake command if(STR EQUAL STR) is bad and may cause many troubles ! + string (REGEX REPLACE "([.+*?^$])" "\\\\\\1" _Sphinx_PYTHON_EXECUTABLE_RE "${PYTHON_EXECUTABLE}") + list (FIND Sphinx_PYTHON_OPTIONS -E IDX) + if (IDX EQUAL -1 AND NOT Sphinx_PYTHON_EXECUTABLE MATCHES "^${_Sphinx_PYTHON_EXECUTABLE_RE}$") + list (INSERT Sphinx_PYTHON_OPTIONS 0 -E) + endif () + unset (_Sphinx_PYTHON_EXECUTABLE_RE) +endif () + +# ---------------------------------------------------------------------------- +# determine Sphinx version +# some quick experiments by @ploxiln +# - sphinx 1.7 and later have the version output format like "sphinx-build 1.7.2" +# - sphinx 1.2 through 1.6 have the version output format like "Sphinx (sphinx-build) 1.2.2" +# - sphinx 1.1 and before do not have a "--version" flag, but it causes the help output like "-h" does which includes version like "Sphinx v1.0.2" +if (Sphinx-build_EXECUTABLE) + # intentionally use invalid -h option here as the help that is shown then + # will include the Sphinx version information + if (Sphinx_PYTHON_EXECUTABLE) + execute_process ( + COMMAND "${Sphinx_PYTHON_EXECUTABLE}" ${Sphinx_PYTHON_OPTIONS} "${Sphinx-build_EXECUTABLE}" --version + OUTPUT_VARIABLE _Sphinx_VERSION + ERROR_VARIABLE _Sphinx_VERSION + ) + elseif (UNIX) + execute_process ( + COMMAND "${Sphinx-build_EXECUTABLE}" --version + OUTPUT_VARIABLE _Sphinx_VERSION + ERROR_VARIABLE _Sphinx_VERSION + ) + endif () + + # The sphinx version can also contain a "b" instead of the last dot. + # For example "Sphinx v1.2b1" or "Sphinx 1.7.0b2" so we cannot just split on "." + if (_Sphinx_VERSION MATCHES "sphinx-build ([0-9]+\\.[0-9]+(\\.|a?|b?)([0-9]*)(b?)([0-9]*))") + set (Sphinx_VERSION_STRING "${CMAKE_MATCH_1}") + set (_SPHINX_VERSION_FOUND) + elseif (_Sphinx_VERSION MATCHES "Sphinx v([0-9]+\\.[0-9]+(\\.|b?)([0-9]*)(b?)([0-9]*))") + set (Sphinx_VERSION_STRING "${CMAKE_MATCH_1}") + set (_SPHINX_VERSION_FOUND) + elseif (_Sphinx_VERSION MATCHES "Sphinx \\(sphinx-build\\) ([0-9]+\\.[0-9]+(\\.|a?|b?)([0-9]*)(b?)([0-9]*))") + set (Sphinx_VERSION_STRING "${CMAKE_MATCH_1}") + set (_SPHINX_VERSION_FOUND) + endif () +endif () + +if(_SPHINX_VERSION_FOUND) + string(REGEX REPLACE "([0-9]+)\\.[0-9]+(\\.|b)[0-9]+" "\\1" Sphinx_VERSION_MAJOR ${Sphinx_VERSION_STRING}) + string(REGEX REPLACE "[0-9]+\\.([0-9]+)(\\.|b)[0-9]+" "\\1" Sphinx_VERSION_MINOR ${Sphinx_VERSION_STRING}) + string(REGEX REPLACE "[0-9]+\\.[0-9]+(\\.|b)([0-9]+)" "\\1" Sphinx_VERSION_PATCH ${Sphinx_VERSION_STRING}) + + # v1.2.0 -> v1.2 + if (Sphinx_VERSION_PATCH EQUAL 0) + string (REGEX REPLACE "\\.0$" "" Sphinx_VERSION_STRING "${Sphinx_VERSION_STRING}") + endif () +endif () + +# ---------------------------------------------------------------------------- +# compatibility with FindPythonInterp.cmake and FindPerl.cmake +set (SPHINX_EXECUTABLE "${Sphinx-build_EXECUTABLE}") + +# ---------------------------------------------------------------------------- +# handle the QUIETLY and REQUIRED arguments and set SPHINX_FOUND to TRUE if +# all listed variables are TRUE +include (FindPackageHandleStandardArgs) +FIND_PACKAGE_HANDLE_STANDARD_ARGS ( + Sphinx + REQUIRED_VARS + ${_Sphinx_REQUIRED_VARS} +# VERSION_VAR # This isn't available until CMake 2.8.8 so don't use it. + Sphinx_VERSION_STRING +) + +# ---------------------------------------------------------------------------- +# set Sphinx_DIR +if (NOT Sphinx_DIR AND Sphinx-build_EXECUTABLE) + get_filename_component (Sphinx_DIR "${Sphinx-build_EXECUTABLE}" PATH) + string (REGEX REPLACE "/bin/?" "" Sphinx_DIR "${Sphinx_DIR}") + set (Sphinx_DIR "${Sphinx_DIR}" CACHE PATH "Installation directory of Sphinx tools." FORCE) +endif () + +unset (_Sphinx_VERSION) +unset (_Sphinx_REQUIRED_VARS) \ No newline at end of file diff --git a/solo-ckpool-source/src/jansson-2.14/cmake/janssonConfig.cmake.in b/solo-ckpool-source/src/jansson-2.14/cmake/janssonConfig.cmake.in new file mode 100644 index 0000000..abd6793 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/cmake/janssonConfig.cmake.in @@ -0,0 +1,4 @@ +@PACKAGE_INIT@ + +include("${CMAKE_CURRENT_LIST_DIR}/janssonTargets.cmake") +check_required_components("@PROJECT_NAME@") diff --git a/solo-ckpool-source/src/jansson-2.14/cmake/jansson_config.h.cmake b/solo-ckpool-source/src/jansson-2.14/cmake/jansson_config.h.cmake new file mode 100644 index 0000000..2f248cb --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/cmake/jansson_config.h.cmake @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2010-2016 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + * + * + * This file specifies a part of the site-specific configuration for + * Jansson, namely those things that affect the public API in + * jansson.h. + * + * The CMake system will generate the jansson_config.h file and + * copy it to the build and install directories. + */ + +#ifndef JANSSON_CONFIG_H +#define JANSSON_CONFIG_H + +/* Define this so that we can disable scattered automake configuration in source files */ +#ifndef JANSSON_USING_CMAKE +#define JANSSON_USING_CMAKE +#endif + +/* Note: when using cmake, JSON_INTEGER_IS_LONG_LONG is not defined nor used, + * as we will also check for __int64 etc types. + * (the definition was used in the automake system) */ + +/* Bring in the cmake-detected defines */ +#cmakedefine HAVE_STDINT_H 1 +#cmakedefine HAVE_INTTYPES_H 1 +#cmakedefine HAVE_SYS_TYPES_H 1 + +/* Include our standard type header for the integer typedef */ + +#if defined(HAVE_STDINT_H) +# include +#elif defined(HAVE_INTTYPES_H) +# include +#elif defined(HAVE_SYS_TYPES_H) +# include +#endif + + +/* If your compiler supports the inline keyword in C, JSON_INLINE is + defined to `inline', otherwise empty. In C++, the inline is always + supported. */ +#ifdef __cplusplus +#define JSON_INLINE inline +#else +#define JSON_INLINE @JSON_INLINE@ +#endif + + +#define json_int_t @JSON_INT_T@ +#define json_strtoint @JSON_STRTOINT@ +#define JSON_INTEGER_FORMAT @JSON_INTEGER_FORMAT@ + + +/* If locale.h and localeconv() are available, define to 1, otherwise to 0. */ +#define JSON_HAVE_LOCALECONV @JSON_HAVE_LOCALECONV@ + +/* If __atomic builtins are available they will be used to manage + reference counts of json_t. */ +#define JSON_HAVE_ATOMIC_BUILTINS @JSON_HAVE_ATOMIC_BUILTINS@ + +/* If __atomic builtins are not available we try using __sync builtins + to manage reference counts of json_t. */ +#define JSON_HAVE_SYNC_BUILTINS @JSON_HAVE_SYNC_BUILTINS@ + +/* Maximum recursion depth for parsing JSON input. + This limits the depth of e.g. array-within-array constructions. */ +#define JSON_PARSER_MAX_DEPTH 2048 + +#endif diff --git a/solo-ckpool-source/src/jansson-2.14/cmake/jansson_private_config.h.cmake b/solo-ckpool-source/src/jansson-2.14/cmake/jansson_private_config.h.cmake new file mode 100644 index 0000000..b7c4514 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/cmake/jansson_private_config.h.cmake @@ -0,0 +1,53 @@ +#cmakedefine HAVE_ENDIAN_H 1 +#cmakedefine HAVE_FCNTL_H 1 +#cmakedefine HAVE_SCHED_H 1 +#cmakedefine HAVE_UNISTD_H 1 +#cmakedefine HAVE_SYS_PARAM_H 1 +#cmakedefine HAVE_SYS_STAT_H 1 +#cmakedefine HAVE_SYS_TIME_H 1 +#cmakedefine HAVE_SYS_TYPES_H 1 +#cmakedefine HAVE_STDINT_H 1 + +#cmakedefine HAVE_CLOSE 1 +#cmakedefine HAVE_GETPID 1 +#cmakedefine HAVE_GETTIMEOFDAY 1 +#cmakedefine HAVE_OPEN 1 +#cmakedefine HAVE_READ 1 +#cmakedefine HAVE_SCHED_YIELD 1 + +#cmakedefine HAVE_SYNC_BUILTINS 1 +#cmakedefine HAVE_ATOMIC_BUILTINS 1 + +#cmakedefine HAVE_LOCALE_H 1 +#cmakedefine HAVE_SETLOCALE 1 + +#cmakedefine HAVE_INT32_T 1 +#ifndef HAVE_INT32_T +# define int32_t @JSON_INT32@ +#endif + +#cmakedefine HAVE_UINT32_T 1 +#ifndef HAVE_UINT32_T +# define uint32_t @JSON_UINT32@ +#endif + +#cmakedefine HAVE_UINT16_T 1 +#ifndef HAVE_UINT16_T +# define uint16_t @JSON_UINT16@ +#endif + +#cmakedefine HAVE_UINT8_T 1 +#ifndef HAVE_UINT8_T +# define uint8_t @JSON_UINT8@ +#endif + +#cmakedefine HAVE_SSIZE_T 1 + +#ifndef HAVE_SSIZE_T +# define ssize_t @JSON_SSIZE@ +#endif + +#cmakedefine USE_URANDOM 1 +#cmakedefine USE_WINDOWS_CRYPTOAPI 1 + +#define INITIAL_HASHTABLE_ORDER @JANSSON_INITIAL_HASHTABLE_ORDER@ diff --git a/solo-ckpool-source/src/jansson-2.14/configure.ac b/solo-ckpool-source/src/jansson-2.14/configure.ac new file mode 100644 index 0000000..bcca185 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/configure.ac @@ -0,0 +1,168 @@ +AC_PREREQ([2.71]) +AC_INIT([jansson],[2.14],[https://github.com/akheron/jansson/issues]) + +AC_CONFIG_AUX_DIR([.]) +AM_INIT_AUTOMAKE([1.10 foreign]) + +AC_CONFIG_SRCDIR([src/value.c]) +AC_CONFIG_HEADERS([jansson_private_config.h]) +AC_CONFIG_MACRO_DIRS([m4]) + +# Checks for programs. +AC_PROG_CC +AC_PROG_CXX +LT_INIT +AM_CONDITIONAL([GCC], [test x$GCC = xyes]) + +# Checks for libraries. + +# Checks for header files. +AC_CHECK_HEADERS([endian.h fcntl.h locale.h sched.h unistd.h sys/param.h sys/stat.h sys/time.h sys/types.h]) + +# Checks for typedefs, structures, and compiler characteristics. +AC_TYPE_INT32_T +AC_TYPE_UINT32_T +AC_TYPE_UINT16_T +AC_TYPE_UINT8_T +AC_TYPE_LONG_LONG_INT + +AC_C_INLINE +case $ac_cv_c_inline in + yes) json_inline=inline;; + no) json_inline=;; + *) json_inline=$ac_cv_c_inline;; +esac +AC_SUBST([json_inline]) + +# Checks for library functions. +AC_CHECK_FUNCS([close getpid gettimeofday localeconv open read sched_yield strtoll]) + +AC_MSG_CHECKING([for gcc __sync builtins]) +have_sync_builtins=no +AC_LINK_IFELSE([AC_LANG_PROGRAM([[]], [[unsigned long val; __sync_bool_compare_and_swap(&val, 0, 1); __sync_add_and_fetch(&val, 1); __sync_sub_and_fetch(&val, 1);]])],[have_sync_builtins=yes],[]) +if test "x$have_sync_builtins" = "xyes"; then + AC_DEFINE([HAVE_SYNC_BUILTINS], [1], + [Define to 1 if gcc's __sync builtins are available]) + json_have_sync_builtins=1 +else + json_have_sync_builtins=0 +fi +AC_SUBST([json_have_sync_builtins]) +AC_MSG_RESULT([$have_sync_builtins]) + +AC_MSG_CHECKING([for gcc __atomic builtins]) +have_atomic_builtins=no +AC_LINK_IFELSE([AC_LANG_PROGRAM([[]], [[char l; unsigned long v; __atomic_test_and_set(&l, __ATOMIC_RELAXED); __atomic_store_n(&v, 1, __ATOMIC_RELEASE); __atomic_load_n(&v, __ATOMIC_ACQUIRE); __atomic_add_fetch(&v, 1, __ATOMIC_ACQUIRE); __atomic_sub_fetch(&v, 1, __ATOMIC_RELEASE);]])],[have_atomic_builtins=yes],[]) +if test "x$have_atomic_builtins" = "xyes"; then + AC_DEFINE([HAVE_ATOMIC_BUILTINS], [1], + [Define to 1 if gcc's __atomic builtins are available]) + json_have_atomic_builtins=1 +else + json_have_atomic_builtins=0 +fi +AC_SUBST([json_have_atomic_builtins]) +AC_MSG_RESULT([$have_atomic_builtins]) + +case "$ac_cv_type_long_long_int$ac_cv_func_strtoll" in + yesyes) json_have_long_long=1;; + *) json_have_long_long=0;; +esac +AC_SUBST([json_have_long_long]) + +case "$ac_cv_header_locale_h$ac_cv_func_localeconv" in + yesyes) json_have_localeconv=1;; + *) json_have_localeconv=0;; +esac +AC_SUBST([json_have_localeconv]) + +# Features +AC_ARG_ENABLE([urandom], + [AS_HELP_STRING([--disable-urandom], + [Don't use /dev/urandom to seed the hash function])], + [use_urandom=$enableval], [use_urandom=yes]) + +if test "x$use_urandom" = xyes; then +AC_DEFINE([USE_URANDOM], [1], + [Define to 1 if /dev/urandom should be used for seeding the hash function]) +fi + +AC_ARG_ENABLE([windows-cryptoapi], + [AS_HELP_STRING([--disable-windows-cryptoapi], + [Don't use CryptGenRandom to seed the hash function])], + [use_windows_cryptoapi=$enableval], [use_windows_cryptoapi=yes]) + +if test "x$use_windows_cryptoapi" = xyes; then +AC_DEFINE([USE_WINDOWS_CRYPTOAPI], [1], + [Define to 1 if CryptGenRandom should be used for seeding the hash function]) +fi + +AC_ARG_ENABLE([initial-hashtable-order], + [AS_HELP_STRING([--enable-initial-hashtable-order=VAL], + [Number of buckets new object hashtables contain is 2 raised to this power. The default is 3, so empty hashtables contain 2^3 = 8 buckets.])], + [initial_hashtable_order=$enableval], [initial_hashtable_order=3]) +AC_DEFINE_UNQUOTED([INITIAL_HASHTABLE_ORDER], [$initial_hashtable_order], + [Number of buckets new object hashtables contain is 2 raised to this power. E.g. 3 -> 2^3 = 8.]) + +AC_ARG_ENABLE([Bsymbolic], + [AS_HELP_STRING([--disable-Bsymbolic], + [Avoid linking with -Bsymbolic-function])], + [], [with_Bsymbolic=check]) + +if test "x$with_Bsymbolic" != "xno" ; then + AC_MSG_CHECKING([for -Bsymbolic-functions linker flag]) + saved_LDFLAGS="${LDFLAGS}" + LDFLAGS=-Wl,-Bsymbolic-functions + AC_LINK_IFELSE([AC_LANG_PROGRAM([[]], [[int main (void) { return 0; }]])],[AC_MSG_RESULT([yes]) + have_Bsymbolic=yes],[AC_MSG_RESULT([no]) + have_Bsymbolic=no + ]) + LDFLAGS="${saved_LDFLAGS}" + + if test "x$with_Bsymbolic" = "xcheck" ; then + with_Bsymbolic=$have_Bsymbolic; + fi + if test "x$with_Bsymbolic:x$have_Bsymbolic" = "xyes:xno" ; then + AC_MSG_ERROR([linker support is required for -Bsymbolic]) + fi +fi + +AS_IF([test "x$with_Bsymbolic" = "xyes"], [JSON_BSYMBOLIC_LDFLAGS=-Wl[,]-Bsymbolic-functions]) +AC_SUBST(JSON_BSYMBOLIC_LDFLAGS) + +# Enable symbol versioning on GNU libc +JSON_SYMVER_LDFLAGS= +AC_CHECK_DECL([__GLIBC__], [JSON_SYMVER_LDFLAGS=-Wl,--default-symver]) +AC_SUBST([JSON_SYMVER_LDFLAGS]) + +AC_ARG_ENABLE([ossfuzzers], + [AS_HELP_STRING([--enable-ossfuzzers], + [Whether to generate the fuzzers for OSS-Fuzz])], + [have_ossfuzzers=yes], [have_ossfuzzers=no]) +AM_CONDITIONAL([USE_OSSFUZZERS], [test "x$have_ossfuzzers" = "xyes"]) + + +AC_SUBST([LIB_FUZZING_ENGINE]) +AM_CONDITIONAL([USE_OSSFUZZ_FLAG], [test "x$LIB_FUZZING_ENGINE" = "x-fsanitize=fuzzer"]) +AM_CONDITIONAL([USE_OSSFUZZ_STATIC], [test -f "$LIB_FUZZING_ENGINE"]) + + +if test x$GCC = xyes; then + AC_MSG_CHECKING(for -Wno-format-truncation) + wnoformat_truncation="-Wno-format-truncation" + AS_IF([${CC} -Wno-format-truncation -Werror -S -o /dev/null -xc /dev/null > /dev/null 2>&1], + [AC_MSG_RESULT(yes)], + [AC_MSG_RESULT(no) + wnoformat_truncation=""]) + + AM_CFLAGS="-Wall -Wextra -Wdeclaration-after-statement -Wshadow ${wnoformat_truncation}" +fi +AC_SUBST([AM_CFLAGS]) + +AC_CONFIG_FILES([ + jansson.pc + Makefile + doc/Makefile + src/Makefile + src/jansson_config.h +]) +AC_OUTPUT diff --git a/solo-ckpool-source/src/jansson-2.14/doc/Makefile.am b/solo-ckpool-source/src/jansson-2.14/doc/Makefile.am new file mode 100644 index 0000000..8186a7d --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/doc/Makefile.am @@ -0,0 +1,20 @@ +EXTRA_DIST = conf.py apiref.rst changes.rst conformance.rst \ + gettingstarted.rst github_commits.c index.rst threadsafety.rst \ + tutorial.rst upgrading.rst ext/refcounting.py + +SPHINXBUILD = sphinx-build +SPHINXOPTS = -d _build/doctrees $(SPHINXOPTS_EXTRA) + +html-local: + $(SPHINXBUILD) -b html $(SPHINXOPTS) $(srcdir) _build/html + +install-html-local: html + mkdir -p $(DESTDIR)$(htmldir) + cp -r _build/html $(DESTDIR)$(htmldir) + +uninstall-local: + rm -rf $(DESTDIR)$(htmldir) + +clean-local: + rm -rf _build + rm -f ext/refcounting.pyc diff --git a/solo-ckpool-source/src/jansson-2.14/doc/README b/solo-ckpool-source/src/jansson-2.14/doc/README new file mode 100644 index 0000000..930b3bf --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/doc/README @@ -0,0 +1,5 @@ +To build the documentation, invoke + + make html + +Then point your browser to _build/html/index.html. diff --git a/solo-ckpool-source/src/jansson-2.14/doc/apiref.rst b/solo-ckpool-source/src/jansson-2.14/doc/apiref.rst new file mode 100644 index 0000000..4bfb687 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/doc/apiref.rst @@ -0,0 +1,2064 @@ +.. _apiref: + +************* +API Reference +************* + +.. highlight:: c + +Preliminaries +============= + +All declarations are in :file:`jansson.h`, so it's enough to + +:: + + #include + +in each source file. + +All constants are prefixed with ``JSON_`` (except for those describing +the library version, prefixed with ``JANSSON_``). Other identifiers +are prefixed with ``json_``. Type names are suffixed with ``_t`` and +``typedef``\ 'd so that the ``struct`` keyword need not be used. + + +Library Version +=============== + +The Jansson version is of the form *A.B.C*, where *A* is the major +version, *B* is the minor version and *C* is the micro version. If the +micro version is zero, it's omitted from the version string, i.e. the +version string is just *A.B*. + +When a new release only fixes bugs and doesn't add new features or +functionality, the micro version is incremented. When new features are +added in a backwards compatible way, the minor version is incremented +and the micro version is set to zero. When there are backwards +incompatible changes, the major version is incremented and others are +set to zero. + +The following preprocessor constants specify the current version of +the library: + +``JANSSON_MAJOR_VERSION``, ``JANSSON_MINOR_VERSION``, ``JANSSON_MICRO_VERSION`` + Integers specifying the major, minor and micro versions, + respectively. + +``JANSSON_VERSION`` + A string representation of the current version, e.g. ``"1.2.1"`` or + ``"1.3"``. + +``JANSSON_VERSION_HEX`` + A 3-byte hexadecimal representation of the version, e.g. + ``0x010201`` for version 1.2.1 and ``0x010300`` for version 1.3. + This is useful in numeric comparisons, e.g.:: + + #if JANSSON_VERSION_HEX >= 0x010300 + /* Code specific to version 1.3 and above */ + #endif + +Additionally, there are functions to determine the version of Jansson at +runtime: + +.. function:: const char *jansson_version_str() + + Return the version of the Jansson library, in the same format as + the ``JANSSON_VERSION`` preprocessor constant. + + .. versionadded:: 2.13 + +.. function:: int jansson_version_cmp(int major, int minor, int micro) + + Returns an integer less than, equal to, or greater than zero if + the runtime version of Jansson is found, respectively, to be less + than, to match, or be greater than the provided *major*, *minor*, and + *micro*. + + .. versionadded:: 2.13 + +``JANSSON_THREAD_SAFE_REFCOUNT`` + If this value is defined all read-only operations and reference counting in + Jansson are thread safe. This value is not defined for versions older than + ``2.11`` or when the compiler does not provide built-in atomic functions. + + +Value Representation +==================== + +The JSON specification (:rfc:`4627`) defines the following data types: +*object*, *array*, *string*, *number*, *boolean*, and *null*. JSON +types are used dynamically; arrays and objects can hold any other data +type, including themselves. For this reason, Jansson's type system is +also dynamic in nature. There's one C type to represent all JSON +values, and this structure knows the type of the JSON value it holds. + +.. type:: json_t + + This data structure is used throughout the library to represent all + JSON values. It always contains the type of the JSON value it holds + and the value's reference count. The rest depends on the type of the + value. + +Objects of :type:`json_t` are always used through a pointer. There +are APIs for querying the type, manipulating the reference count, and +for constructing and manipulating values of different types. + +Unless noted otherwise, all API functions return an error value if an +error occurs. Depending on the function's signature, the error value +is either *NULL* or -1. Invalid arguments or invalid input are +apparent sources for errors. Memory allocation and I/O operations may +also cause errors. + + +Type +---- + +.. c:enum:: json_type + + The type of a JSON value. The following members are defined: + + +--------------------+ + | ``JSON_OBJECT`` | + +--------------------+ + | ``JSON_ARRAY`` | + +--------------------+ + | ``JSON_STRING`` | + +--------------------+ + | ``JSON_INTEGER`` | + +--------------------+ + | ``JSON_REAL`` | + +--------------------+ + | ``JSON_TRUE`` | + +--------------------+ + | ``JSON_FALSE`` | + +--------------------+ + | ``JSON_NULL`` | + +--------------------+ + + These correspond to JSON object, array, string, number, boolean and + null. A number is represented by either a value of the type + ``JSON_INTEGER`` or of the type ``JSON_REAL``. A true boolean value + is represented by a value of the type ``JSON_TRUE`` and false by a + value of the type ``JSON_FALSE``. + +.. function:: int json_typeof(const json_t *json) + + Return the type of the JSON value (a :type:`json_type` cast to + ``int``). *json* MUST NOT be *NULL*. This function is actually + implemented as a macro for speed. + +.. function:: int json_is_object(const json_t *json) + int json_is_array(const json_t *json) + int json_is_string(const json_t *json) + int json_is_integer(const json_t *json) + int json_is_real(const json_t *json) + int json_is_true(const json_t *json) + int json_is_false(const json_t *json) + int json_is_null(const json_t *json) + + These functions (actually macros) return true (non-zero) for values + of the given type, and false (zero) for values of other types and + for *NULL*. + +.. function:: int json_is_number(const json_t *json) + + Returns true for values of types ``JSON_INTEGER`` and + ``JSON_REAL``, and false for other types and for *NULL*. + +.. function:: int json_is_boolean(const json_t *json) + + Returns true for types ``JSON_TRUE`` and ``JSON_FALSE``, and false + for values of other types and for *NULL*. + +.. function:: int json_boolean_value(const json_t *json) + + Alias of :func:`json_is_true()`, i.e. returns 1 for ``JSON_TRUE`` + and 0 otherwise. + + .. versionadded:: 2.7 + + +.. _apiref-reference-count: + +Reference Count +--------------- + +The reference count is used to track whether a value is still in use +or not. When a value is created, it's reference count is set to 1. If +a reference to a value is kept (e.g. a value is stored somewhere for +later use), its reference count is incremented, and when the value is +no longer needed, the reference count is decremented. When the +reference count drops to zero, there are no references left, and the +value can be destroyed. + +.. function:: json_t *json_incref(json_t *json) + + Increment the reference count of *json* if it's not *NULL*. + Returns *json*. + +.. function:: void json_decref(json_t *json) + + Decrement the reference count of *json*. As soon as a call to + :func:`json_decref()` drops the reference count to zero, the value + is destroyed and it can no longer be used. + +Functions creating new JSON values set the reference count to 1. These +functions are said to return a **new reference**. Other functions +returning (existing) JSON values do not normally increase the +reference count. These functions are said to return a **borrowed +reference**. So, if the user will hold a reference to a value returned +as a borrowed reference, he must call :func:`json_incref`. As soon as +the value is no longer needed, :func:`json_decref` should be called +to release the reference. + +Normally, all functions accepting a JSON value as an argument will +manage the reference, i.e. increase and decrease the reference count +as needed. However, some functions **steal** the reference, i.e. they +have the same result as if the user called :func:`json_decref()` on +the argument right after calling the function. These functions are +suffixed with ``_new`` or have ``_new_`` somewhere in their name. + +For example, the following code creates a new JSON array and appends +an integer to it:: + + json_t *array, *integer; + + array = json_array(); + integer = json_integer(42); + + json_array_append(array, integer); + json_decref(integer); + +Note how the caller has to release the reference to the integer value +by calling :func:`json_decref()`. By using a reference stealing +function :func:`json_array_append_new()` instead of +:func:`json_array_append()`, the code becomes much simpler:: + + json_t *array = json_array(); + json_array_append_new(array, json_integer(42)); + +In this case, the user doesn't have to explicitly release the +reference to the integer value, as :func:`json_array_append_new()` +steals the reference when appending the value to the array. + +In the following sections it is clearly documented whether a function +will return a new or borrowed reference or steal a reference to its +argument. + + +Circular References +------------------- + +A circular reference is created when an object or an array is, +directly or indirectly, inserted inside itself. The direct case is +simple:: + + json_t *obj = json_object(); + json_object_set(obj, "foo", obj); + +Jansson will refuse to do this, and :func:`json_object_set()` (and +all the other such functions for objects and arrays) will return with +an error status. The indirect case is the dangerous one:: + + json_t *arr1 = json_array(), *arr2 = json_array(); + json_array_append(arr1, arr2); + json_array_append(arr2, arr1); + +In this example, the array ``arr2`` is contained in the array +``arr1``, and vice versa. Jansson cannot check for this kind of +indirect circular references without a performance hit, so it's up to +the user to avoid them. + +If a circular reference is created, the memory consumed by the values +cannot be freed by :func:`json_decref()`. The reference counts never +drops to zero because the values are keeping the references to each +other. Moreover, trying to encode the values with any of the encoding +functions will fail. The encoder detects circular references and +returns an error status. + +Scope Dereferencing +------------------- + +.. versionadded:: 2.9 + +It is possible to use the ``json_auto_t`` type to automatically +dereference a value at the end of a scope. For example:: + + void function(void) { + json_auto_t *value = NULL; + value = json_string("foo"); + /* json_decref(value) is automatically called. */ + } + +This feature is only available on GCC and Clang. So if your project +has a portability requirement for other compilers, you should avoid +this feature. + +Additionally, as always, care should be taken when passing values to +functions that steal references. + +True, False and Null +==================== + +These three values are implemented as singletons, so the returned +pointers won't change between invocations of these functions. + +.. function:: json_t *json_true(void) + + .. refcounting:: new + + Returns the JSON true value. + +.. function:: json_t *json_false(void) + + .. refcounting:: new + + Returns the JSON false value. + +.. function:: json_t *json_boolean(val) + + .. refcounting:: new + + Returns JSON false if ``val`` is zero, and JSON true otherwise. + This is a macro, and equivalent to ``val ? json_true() : + json_false()``. + + .. versionadded:: 2.4 + + +.. function:: json_t *json_null(void) + + .. refcounting:: new + + Returns the JSON null value. + + +String +====== + +Jansson uses UTF-8 as the character encoding. All JSON strings must be +valid UTF-8 (or ASCII, as it's a subset of UTF-8). All Unicode +codepoints U+0000 through U+10FFFF are allowed, but you must use +length-aware functions if you wish to embed null bytes in strings. + +.. function:: json_t *json_string(const char *value) + + .. refcounting:: new + + Returns a new JSON string, or *NULL* on error. *value* must be a + valid null terminated UTF-8 encoded Unicode string. + +.. function:: json_t *json_stringn(const char *value, size_t len) + + .. refcounting:: new + + Like :func:`json_string`, but with explicit length, so *value* may + contain null characters or not be null terminated. + + .. versionadded:: 2.7 + +.. function:: json_t *json_string_nocheck(const char *value) + + .. refcounting:: new + + Like :func:`json_string`, but doesn't check that *value* is valid + UTF-8. Use this function only if you are certain that this really + is the case (e.g. you have already checked it by other means). + +.. function:: json_t *json_stringn_nocheck(const char *value, size_t len) + + .. refcounting:: new + + Like :func:`json_string_nocheck`, but with explicit length, so + *value* may contain null characters or not be null terminated. + + .. versionadded:: 2.7 + +.. function:: const char *json_string_value(const json_t *string) + + Returns the associated value of *string* as a null terminated UTF-8 + encoded string, or *NULL* if *string* is not a JSON string. + + The returned value is read-only and must not be modified or freed by + the user. It is valid as long as *string* exists, i.e. as long as + its reference count has not dropped to zero. + +.. function:: size_t json_string_length(const json_t *string) + + Returns the length of *string* in its UTF-8 presentation, or zero + if *string* is not a JSON string. + + .. versionadded:: 2.7 + +.. function:: int json_string_set(json_t *string, const char *value) + + Sets the associated value of *string* to *value*. *value* must be a + valid UTF-8 encoded Unicode string. Returns 0 on success and -1 on + error. + +.. function:: int json_string_setn(json_t *string, const char *value, size_t len) + + Like :func:`json_string_set`, but with explicit length, so *value* + may contain null characters or not be null terminated. + + .. versionadded:: 2.7 + +.. function:: int json_string_set_nocheck(json_t *string, const char *value) + + Like :func:`json_string_set`, but doesn't check that *value* is + valid UTF-8. Use this function only if you are certain that this + really is the case (e.g. you have already checked it by other + means). + +.. function:: int json_string_setn_nocheck(json_t *string, const char *value, size_t len) + + Like :func:`json_string_set_nocheck`, but with explicit length, + so *value* may contain null characters or not be null terminated. + + .. versionadded:: 2.7 + +.. function:: json_t *json_sprintf(const char *format, ...) + json_t *json_vsprintf(const char *format, va_list ap) + + .. refcounting:: new + + Construct a JSON string from a format string and varargs, just like + :func:`printf()`. + + .. versionadded:: 2.11 + + +Number +====== + +The JSON specification only contains one numeric type, "number". The C +programming language has distinct types for integer and floating-point +numbers, so for practical reasons Jansson also has distinct types for +the two. They are called "integer" and "real", respectively. For more +information, see :ref:`rfc-conformance`. + +.. type:: json_int_t + + This is the C type that is used to store JSON integer values. It + represents the widest integer type available on your system. In + practice it's just a typedef of ``long long`` if your compiler + supports it, otherwise ``long``. + + Usually, you can safely use plain ``int`` in place of + ``json_int_t``, and the implicit C integer conversion handles the + rest. Only when you know that you need the full 64-bit range, you + should use ``json_int_t`` explicitly. + +``JSON_INTEGER_IS_LONG_LONG`` + This is a preprocessor variable that holds the value 1 if + :type:`json_int_t` is ``long long``, and 0 if it's ``long``. It + can be used as follows:: + + #if JSON_INTEGER_IS_LONG_LONG + /* Code specific for long long */ + #else + /* Code specific for long */ + #endif + +``JSON_INTEGER_FORMAT`` + This is a macro that expands to a :func:`printf()` conversion + specifier that corresponds to :type:`json_int_t`, without the + leading ``%`` sign, i.e. either ``"lld"`` or ``"ld"``. This macro + is required because the actual type of :type:`json_int_t` can be + either ``long`` or ``long long``, and :func:`printf()` requires + different length modifiers for the two. + + Example:: + + json_int_t x = 123123123; + printf("x is %" JSON_INTEGER_FORMAT "\n", x); + + +.. function:: json_t *json_integer(json_int_t value) + + .. refcounting:: new + + Returns a new JSON integer, or *NULL* on error. + +.. function:: json_int_t json_integer_value(const json_t *integer) + + Returns the associated value of *integer*, or 0 if *json* is not a + JSON integer. + +.. function:: int json_integer_set(const json_t *integer, json_int_t value) + + Sets the associated value of *integer* to *value*. Returns 0 on + success and -1 if *integer* is not a JSON integer. + +.. function:: json_t *json_real(double value) + + .. refcounting:: new + + Returns a new JSON real, or *NULL* on error. + +.. function:: double json_real_value(const json_t *real) + + Returns the associated value of *real*, or 0.0 if *real* is not a + JSON real. + +.. function:: int json_real_set(const json_t *real, double value) + + Sets the associated value of *real* to *value*. Returns 0 on + success and -1 if *real* is not a JSON real. + +.. function:: double json_number_value(const json_t *json) + + Returns the associated value of the JSON integer or JSON real + *json*, cast to double regardless of the actual type. If *json* is + neither JSON real nor JSON integer, 0.0 is returned. + + +Array +===== + +A JSON array is an ordered collection of other JSON values. + +.. function:: json_t *json_array(void) + + .. refcounting:: new + + Returns a new JSON array, or *NULL* on error. Initially, the array + is empty. + +.. function:: size_t json_array_size(const json_t *array) + + Returns the number of elements in *array*, or 0 if *array* is NULL + or not a JSON array. + +.. function:: json_t *json_array_get(const json_t *array, size_t index) + + .. refcounting:: borrow + + Returns the element in *array* at position *index*. The valid range + for *index* is from 0 to the return value of + :func:`json_array_size()` minus 1. If *array* is not a JSON array, + if *array* is *NULL*, or if *index* is out of range, *NULL* is + returned. + +.. function:: int json_array_set(json_t *array, size_t index, json_t *value) + + Replaces the element in *array* at position *index* with *value*. + The valid range for *index* is from 0 to the return value of + :func:`json_array_size()` minus 1. Returns 0 on success and -1 on + error. + +.. function:: int json_array_set_new(json_t *array, size_t index, json_t *value) + + Like :func:`json_array_set()` but steals the reference to *value*. + This is useful when *value* is newly created and not used after + the call. + +.. function:: int json_array_append(json_t *array, json_t *value) + + Appends *value* to the end of *array*, growing the size of *array* + by 1. Returns 0 on success and -1 on error. + +.. function:: int json_array_append_new(json_t *array, json_t *value) + + Like :func:`json_array_append()` but steals the reference to + *value*. This is useful when *value* is newly created and not used + after the call. + +.. function:: int json_array_insert(json_t *array, size_t index, json_t *value) + + Inserts *value* to *array* at position *index*, shifting the + elements at *index* and after it one position towards the end of + the array. Returns 0 on success and -1 on error. + +.. function:: int json_array_insert_new(json_t *array, size_t index, json_t *value) + + Like :func:`json_array_insert()` but steals the reference to + *value*. This is useful when *value* is newly created and not used + after the call. + +.. function:: int json_array_remove(json_t *array, size_t index) + + Removes the element in *array* at position *index*, shifting the + elements after *index* one position towards the start of the array. + Returns 0 on success and -1 on error. The reference count of the + removed value is decremented. + +.. function:: int json_array_clear(json_t *array) + + Removes all elements from *array*. Returns 0 on success and -1 on + error. The reference count of all removed values are decremented. + +.. function:: int json_array_extend(json_t *array, json_t *other_array) + + Appends all elements in *other_array* to the end of *array*. + Returns 0 on success and -1 on error. + +.. function:: void json_array_foreach(array, index, value) + + Iterate over every element of ``array``, running the block + of code that follows each time with the proper values set to + variables ``index`` and ``value``, of types :type:`size_t` and + :type:`json_t` pointer respectively. Example:: + + /* array is a JSON array */ + size_t index; + json_t *value; + + json_array_foreach(array, index, value) { + /* block of code that uses index and value */ + } + + The items are returned in increasing index order. + + This macro expands to an ordinary ``for`` statement upon + preprocessing, so its performance is equivalent to that of + hand-written code using the array access functions. + The main advantage of this macro is that it abstracts + away the complexity, and makes for more concise and readable code. + + .. versionadded:: 2.5 + + +Object +====== + +A JSON object is a dictionary of key-value pairs, where the key is a +Unicode string and the value is any JSON value. + +Even though null bytes are allowed in string values, they are not +allowed in object keys. + +.. function:: json_t *json_object(void) + + .. refcounting:: new + + Returns a new JSON object, or *NULL* on error. Initially, the + object is empty. + +.. function:: size_t json_object_size(const json_t *object) + + Returns the number of elements in *object*, or 0 if *object* is not + a JSON object. + +.. function:: json_t *json_object_get(const json_t *object, const char *key) + + .. refcounting:: borrow + + Get a value corresponding to *key* from *object*. Returns *NULL* if + *key* is not found and on error. + +.. function:: json_t *json_object_getn(const json_t *object, const char *key, size_t key_len) + + .. refcounting:: borrow + + Like :func:`json_object_get`, but give the fixed-length *key* with length *key_len*. + See :ref:`fixed_length_keys` for details. + + .. versionadded:: 2.14 + +.. function:: int json_object_set(json_t *object, const char *key, json_t *value) + + Set the value of *key* to *value* in *object*. *key* must be a + valid null terminated UTF-8 encoded Unicode string. If there + already is a value for *key*, it is replaced by the new value. + Returns 0 on success and -1 on error. + +.. function:: int json_object_setn(json_t *object, const char *key, size_t key_len, json_t *value) + + Like :func:`json_object_set`, but give the fixed-length *key* with length *key_len*. + See :ref:`fixed_length_keys` for details. + + .. versionadded:: 2.14 + +.. function:: int json_object_set_nocheck(json_t *object, const char *key, json_t *value) + + Like :func:`json_object_set`, but doesn't check that *key* is + valid UTF-8. Use this function only if you are certain that this + really is the case (e.g. you have already checked it by other + means). + +.. function:: int json_object_setn_nocheck(json_t *object, const char *key, size_t key_len, json_t *value) + + Like :func:`json_object_set_nocheck`, but give the fixed-length *key* with length *key_len*. + See :ref:`fixed_length_keys` for details. + + .. versionadded:: 2.14 + +.. function:: int json_object_set_new(json_t *object, const char *key, json_t *value) + + Like :func:`json_object_set()` but steals the reference to + *value*. This is useful when *value* is newly created and not used + after the call. + +.. function:: int json_object_setn_new(json_t *object, const char *key, size_t key_len, json_t *value) + + Like :func:`json_object_set_new`, but give the fixed-length *key* with length *key_len*. + See :ref:`fixed_length_keys` for details. + + .. versionadded:: 2.14 + +.. function:: int json_object_set_new_nocheck(json_t *object, const char *key, json_t *value) + + Like :func:`json_object_set_new`, but doesn't check that *key* is + valid UTF-8. Use this function only if you are certain that this + really is the case (e.g. you have already checked it by other + means). + +.. function:: int json_object_setn_new_nocheck(json_t *object, const char *key, size_t key_len, json_t *value) + + Like :func:`json_object_set_new_nocheck`, but give the fixed-length *key* with length *key_len*. + See :ref:`fixed_length_keys` for details. + + .. versionadded:: 2.14 + +.. function:: int json_object_del(json_t *object, const char *key) + + Delete *key* from *object* if it exists. Returns 0 on success, or + -1 if *key* was not found. The reference count of the removed value + is decremented. + +.. function:: int json_object_deln(json_t *object, const char *key, size_t key_len) + + Like :func:`json_object_del`, but give the fixed-length *key* with length *key_len*. + See :ref:`fixed_length_keys` for details. + + .. versionadded:: 2.14 + +.. function:: int json_object_clear(json_t *object) + + Remove all elements from *object*. Returns 0 on success and -1 if + *object* is not a JSON object. The reference count of all removed + values are decremented. + +.. function:: int json_object_update(json_t *object, json_t *other) + + Update *object* with the key-value pairs from *other*, overwriting + existing keys. Returns 0 on success or -1 on error. + +.. function:: int json_object_update_existing(json_t *object, json_t *other) + + Like :func:`json_object_update()`, but only the values of existing + keys are updated. No new keys are created. Returns 0 on success or + -1 on error. + + .. versionadded:: 2.3 + +.. function:: int json_object_update_missing(json_t *object, json_t *other) + + Like :func:`json_object_update()`, but only new keys are created. + The value of any existing key is not changed. Returns 0 on success + or -1 on error. + + .. versionadded:: 2.3 + +.. function:: int json_object_update_new(json_t *object, json_t *other) + + Like :func:`json_object_update()`, but steals the reference to + *other*. This is useful when *other* is newly created and not used + after the call. + +.. function:: int json_object_update_existing_new(json_t *object, json_t *other) + + Like :func:`json_object_update_new()`, but only the values of existing + keys are updated. No new keys are created. Returns 0 on success or + -1 on error. + +.. function:: int json_object_update_missing_new(json_t *object, json_t *other) + + Like :func:`json_object_update_new()`, but only new keys are created. + The value of any existing key is not changed. Returns 0 on success + or -1 on error. + +.. function:: int json_object_update_recursive(json_t *object, json_t *other) + + Like :func:`json_object_update()`, but object values in *other* are + recursively merged with the corresponding values in *object* if they are also + objects, instead of overwriting them. Returns 0 on success or -1 on error. + +.. function:: void json_object_foreach(object, key, value) + + Iterate over every key-value pair of ``object``, running the block + of code that follows each time with the proper values set to + variables ``key`` and ``value``, of types ``const char *`` and + :type:`json_t` pointer respectively. Example:: + + /* obj is a JSON object */ + const char *key; + json_t *value; + + json_object_foreach(obj, key, value) { + /* block of code that uses key and value */ + } + + The items are returned in the order they were inserted to the + object. + + **Note:** It's not safe to call ``json_object_del(object, key)`` or ``json_object_deln(object, key, key_len)`` + during iteration. If you need to, use + :func:`json_object_foreach_safe` instead. + + This macro expands to an ordinary ``for`` statement upon + preprocessing, so its performance is equivalent to that of + hand-written iteration code using the object iteration protocol + (see below). The main advantage of this macro is that it abstracts + away the complexity behind iteration, and makes for more concise and + readable code. + + .. versionadded:: 2.3 + + +.. function:: void json_object_foreach_safe(object, tmp, key, value) + + Like :func:`json_object_foreach()`, but it's safe to call + ``json_object_del(object, key)`` or ``json_object_deln(object, key, key_len)`` during iteration. + You need to pass an extra ``void *`` parameter ``tmp`` that is used for temporary storage. + + .. versionadded:: 2.8 + +.. function:: void json_object_keylen_foreach(object, key, key_len, value) + + Like :c:func:`json_object_foreach`, but in *key_len* stored length of the *key*. + Example:: + + /* obj is a JSON object */ + const char *key; + json_t *value; + size_t len; + + json_object_keylen_foreach(obj, key, len, value) { + printf("got key %s with length %zu\n", key, len); + } + + **Note:** It's not safe to call ``json_object_deln(object, key, key_len)`` + during iteration. If you need to, use + :func:`json_object_keylen_foreach_safe` instead. + + .. versionadded:: 2.14 + + +.. function:: void json_object_keylen_foreach_safe(object, tmp, key, key_len, value) + + Like :func:`json_object_keylen_foreach()`, but it's safe to call + ``json_object_deln(object, key, key_len)`` during iteration. + You need to pass an extra ``void *`` parameter ``tmp`` that is used for temporary storage. + + .. versionadded:: 2.14 + +The following functions can be used to iterate through all key-value +pairs in an object. The items are returned in the order they were +inserted to the object. + +.. function:: void *json_object_iter(json_t *object) + + Returns an opaque iterator which can be used to iterate over all + key-value pairs in *object*, or *NULL* if *object* is empty. + +.. function:: void *json_object_iter_at(json_t *object, const char *key) + + Like :func:`json_object_iter()`, but returns an iterator to the + key-value pair in *object* whose key is equal to *key*, or NULL if + *key* is not found in *object*. Iterating forward to the end of + *object* only yields all key-value pairs of the object if *key* + happens to be the first key in the underlying hash table. + +.. function:: void *json_object_iter_next(json_t *object, void *iter) + + Returns an iterator pointing to the next key-value pair in *object* + after *iter*, or *NULL* if the whole object has been iterated + through. + +.. function:: const char *json_object_iter_key(void *iter) + + Extract the associated key from *iter*. + +.. function:: size_t json_object_iter_key_len(void *iter) + + Extract the associated key length from *iter*. + + .. versionadded:: 2.14 + +.. function:: json_t *json_object_iter_value(void *iter) + + .. refcounting:: borrow + + Extract the associated value from *iter*. + +.. function:: int json_object_iter_set(json_t *object, void *iter, json_t *value) + + Set the value of the key-value pair in *object*, that is pointed to + by *iter*, to *value*. + +.. function:: int json_object_iter_set_new(json_t *object, void *iter, json_t *value) + + Like :func:`json_object_iter_set()`, but steals the reference to + *value*. This is useful when *value* is newly created and not used + after the call. + +.. function:: void *json_object_key_to_iter(const char *key) + + Like :func:`json_object_iter_at()`, but much faster. Only works for + values returned by :func:`json_object_iter_key()`. Using other keys + will lead to segfaults. This function is used internally to + implement :func:`json_object_foreach`. Example:: + + /* obj is a JSON object */ + const char *key; + json_t *value; + + void *iter = json_object_iter(obj); + while(iter) + { + key = json_object_iter_key(iter); + value = json_object_iter_value(iter); + /* use key and value ... */ + iter = json_object_iter_next(obj, iter); + } + + .. versionadded:: 2.3 + +.. function:: void json_object_seed(size_t seed) + + Seed the hash function used in Jansson's hashtable implementation. + The seed is used to randomize the hash function so that an + attacker cannot control its output. + + If *seed* is 0, Jansson generates the seed itself by reading + random data from the operating system's entropy sources. If no + entropy sources are available, falls back to using a combination + of the current timestamp (with microsecond precision if possible) + and the process ID. + + If called at all, this function must be called before any calls to + :func:`json_object()`, either explicit or implicit. If this + function is not called by the user, the first call to + :func:`json_object()` (either explicit or implicit) seeds the hash + function. See :ref:`thread-safety` for notes on thread safety. + + If repeatable results are required, for e.g. unit tests, the hash + function can be "unrandomized" by calling :func:`json_object_seed` + with a constant value on program startup, e.g. + ``json_object_seed(1)``. + + .. versionadded:: 2.6 + + +Error reporting +=============== + +Jansson uses a single struct type to pass error information to the +user. See sections :ref:`apiref-decoding`, :ref:`apiref-pack` and +:ref:`apiref-unpack` for functions that pass error information using +this struct. + +.. type:: json_error_t + + .. member:: char text[] + + The error message (in UTF-8), or an empty string if a message is + not available. + + The last byte of this array contains a numeric error code. Use + :func:`json_error_code()` to extract this code. + + .. member:: char source[] + + Source of the error. This can be (a part of) the file name or a + special identifier in angle brackets (e.g. ````). + + .. member:: int line + + The line number on which the error occurred. + + .. member:: int column + + The column on which the error occurred. Note that this is the + *character column*, not the byte column, i.e. a multibyte UTF-8 + character counts as one column. + + .. member:: int position + + The position in bytes from the start of the input. This is + useful for debugging Unicode encoding problems. + +The normal use of :type:`json_error_t` is to allocate it on the stack, +and pass a pointer to a function. Example:: + + int main() { + json_t *json; + json_error_t error; + + json = json_load_file("/path/to/file.json", 0, &error); + if(!json) { + /* the error variable contains error information */ + } + ... + } + +Also note that if the call succeeded (``json != NULL`` in the above +example), the contents of ``error`` are generally left unspecified. +The decoding functions write to the ``position`` member also on +success. See :ref:`apiref-decoding` for more info. + +All functions also accept *NULL* as the :type:`json_error_t` pointer, +in which case no error information is returned to the caller. + +.. c:enum:: json_error_code + + An enumeration containing numeric error codes. The following errors are + currently defined: + + ``json_error_unknown`` + + Unknown error. This should only be returned for non-errorneous + :type:`json_error_t` structures. + + ``json_error_out_of_memory`` + + The library couldn’t allocate any heap memory. + + ``json_error_stack_overflow`` + + Nesting too deep. + + ``json_error_cannot_open_file`` + + Couldn’t open input file. + + ``json_error_invalid_argument`` + + A function argument was invalid. + + ``json_error_invalid_utf8`` + + The input string isn’t valid UTF-8. + + ``json_error_premature_end_of_input`` + + The input ended in the middle of a JSON value. + + ``json_error_end_of_input_expected`` + + There was some text after the end of a JSON value. See the + ``JSON_DISABLE_EOF_CHECK`` flag. + + ``json_error_invalid_syntax`` + + JSON syntax error. + + ``json_error_invalid_format`` + + Invalid format string for packing or unpacking. + + ``json_error_wrong_type`` + + When packing or unpacking, the actual type of a value differed from the + one specified in the format string. + + ``json_error_null_character`` + + A null character was detected in a JSON string. See the + ``JSON_ALLOW_NUL`` flag. + + ``json_error_null_value`` + + When packing or unpacking, some key or value was ``NULL``. + + ``json_error_null_byte_in_key`` + + An object key would contain a null byte. Jansson can’t represent such + keys; see :ref:`rfc-conformance`. + + ``json_error_duplicate_key`` + + Duplicate key in object. See the ``JSON_REJECT_DUPLICATES`` flag. + + ``json_error_numeric_overflow`` + + When converting a JSON number to a C numeric type, a numeric overflow + was detected. + + ``json_error_item_not_found`` + + Key in object not found. + + ``json_error_index_out_of_range`` + + Array index is out of range. + + .. versionadded:: 2.11 + +.. function:: enum json_error_code json_error_code(const json_error_t *error) + + Returns the error code embedded in ``error->text``. + + .. versionadded:: 2.11 + + +Encoding +======== + +This section describes the functions that can be used to encode +values to JSON. By default, only objects and arrays can be encoded +directly, since they are the only valid *root* values of a JSON text. +To encode any JSON value, use the ``JSON_ENCODE_ANY`` flag (see +below). + +By default, the output has no newlines, and spaces are used between +array and object elements for a readable output. This behavior can be +altered by using the ``JSON_INDENT`` and ``JSON_COMPACT`` flags +described below. A newline is never appended to the end of the encoded +JSON data. + +Each function takes a *flags* parameter that controls some aspects of +how the data is encoded. Its default value is 0. The following macros +can be ORed together to obtain *flags*. + +``JSON_INDENT(n)`` + Pretty-print the result, using newlines between array and object + items, and indenting with *n* spaces. The valid range for *n* is + between 0 and 31 (inclusive), other values result in an undefined + output. If ``JSON_INDENT`` is not used or *n* is 0, no newlines are + inserted between array and object items. + + The ``JSON_MAX_INDENT`` constant defines the maximum indentation + that can be used, and its value is 31. + + .. versionchanged:: 2.7 + Added ``JSON_MAX_INDENT``. + +``JSON_COMPACT`` + This flag enables a compact representation, i.e. sets the separator + between array and object items to ``","`` and between object keys + and values to ``":"``. Without this flag, the corresponding + separators are ``", "`` and ``": "`` for more readable output. + +``JSON_ENSURE_ASCII`` + If this flag is used, the output is guaranteed to consist only of + ASCII characters. This is achieved by escaping all Unicode + characters outside the ASCII range. + +``JSON_SORT_KEYS`` + If this flag is used, all the objects in output are sorted by key. + This is useful e.g. if two JSON texts are diffed or visually + compared. + +``JSON_PRESERVE_ORDER`` + **Deprecated since version 2.8:** Order of object keys + is always preserved. + + Prior to version 2.8: If this flag is used, object keys in the + output are sorted into the same order in which they were first + inserted to the object. For example, decoding a JSON text and then + encoding with this flag preserves the order of object keys. + +``JSON_ENCODE_ANY`` + Specifying this flag makes it possible to encode any JSON value on + its own. Without it, only objects and arrays can be passed as the + *json* value to the encoding functions. + + **Note:** Encoding any value may be useful in some scenarios, but + it's generally discouraged as it violates strict compatibility with + :rfc:`4627`. If you use this flag, don't expect interoperability + with other JSON systems. + + .. versionadded:: 2.1 + +``JSON_ESCAPE_SLASH`` + Escape the ``/`` characters in strings with ``\/``. + + .. versionadded:: 2.4 + +``JSON_REAL_PRECISION(n)`` + Output all real numbers with at most *n* digits of precision. The + valid range for *n* is between 0 and 31 (inclusive), and other + values result in an undefined behavior. + + By default, the precision is 17, to correctly and losslessly encode + all IEEE 754 double precision floating point numbers. + + .. versionadded:: 2.7 + +``JSON_EMBED`` + If this flag is used, the opening and closing characters of the top-level + array ('[', ']') or object ('{', '}') are omitted during encoding. This + flag is useful when concatenating multiple arrays or objects into a stream. + + .. versionadded:: 2.10 + +These functions output UTF-8: + +.. function:: char *json_dumps(const json_t *json, size_t flags) + + Returns the JSON representation of *json* as a string, or *NULL* on + error. *flags* is described above. The return value must be freed + by the caller using :func:`free()`. Note that if you have called + :func:`json_set_alloc_funcs()` to override :func:`free()`, you should + call your custom free function instead to free the return value. + +.. function:: size_t json_dumpb(const json_t *json, char *buffer, size_t size, size_t flags) + + Writes the JSON representation of *json* to the *buffer* of + *size* bytes. Returns the number of bytes that would be written + or 0 on error. *flags* is described above. *buffer* is not + null-terminated. + + This function never writes more than *size* bytes. If the return + value is greater than *size*, the contents of the *buffer* are + undefined. This behavior enables you to specify a NULL *buffer* + to determine the length of the encoding. For example:: + + size_t size = json_dumpb(json, NULL, 0, 0); + if (size == 0) + return -1; + + char *buf = alloca(size); + + size = json_dumpb(json, buf, size, 0); + + .. versionadded:: 2.10 + +.. function:: int json_dumpf(const json_t *json, FILE *output, size_t flags) + + Write the JSON representation of *json* to the stream *output*. + *flags* is described above. Returns 0 on success and -1 on error. + If an error occurs, something may have already been written to + *output*. In this case, the output is undefined and most likely not + valid JSON. + +.. function:: int json_dumpfd(const json_t *json, int output, size_t flags) + + Write the JSON representation of *json* to the stream *output*. + *flags* is described above. Returns 0 on success and -1 on error. + If an error occurs, something may have already been written to + *output*. In this case, the output is undefined and most likely not + valid JSON. + + It is important to note that this function can only succeed on stream + file descriptors (such as SOCK_STREAM). Using this function on a + non-stream file descriptor will result in undefined behavior. For + non-stream file descriptors, see instead :func:`json_dumpb()`. + + This function requires POSIX and fails on all non-POSIX systems. + + .. versionadded:: 2.10 + +.. function:: int json_dump_file(const json_t *json, const char *path, size_t flags) + + Write the JSON representation of *json* to the file *path*. If + *path* already exists, it is overwritten. *flags* is described + above. Returns 0 on success and -1 on error. + +.. type:: json_dump_callback_t + + A typedef for a function that's called by + :func:`json_dump_callback()`:: + + typedef int (*json_dump_callback_t)(const char *buffer, size_t size, void *data); + + *buffer* points to a buffer containing a chunk of output, *size* is + the length of the buffer, and *data* is the corresponding + :func:`json_dump_callback()` argument passed through. + + *buffer* is guaranteed to be a valid UTF-8 string (i.e. multi-byte + code unit sequences are preserved). *buffer* never contains + embedded null bytes. + + On error, the function should return -1 to stop the encoding + process. On success, it should return 0. + + .. versionadded:: 2.2 + +.. function:: int json_dump_callback(const json_t *json, json_dump_callback_t callback, void *data, size_t flags) + + Call *callback* repeatedly, passing a chunk of the JSON + representation of *json* each time. *flags* is described above. + Returns 0 on success and -1 on error. + + .. versionadded:: 2.2 + + +.. _apiref-decoding: + +Decoding +======== + +This section describes the functions that can be used to decode JSON +text to the Jansson representation of JSON data. The JSON +specification requires that a JSON text is either a serialized array +or object, and this requirement is also enforced with the following +functions. In other words, the top level value in the JSON text being +decoded must be either array or object. To decode any JSON value, use +the ``JSON_DECODE_ANY`` flag (see below). + +See :ref:`rfc-conformance` for a discussion on Jansson's conformance +to the JSON specification. It explains many design decisions that +affect especially the behavior of the decoder. + +Each function takes a *flags* parameter that can be used to control +the behavior of the decoder. Its default value is 0. The following +macros can be ORed together to obtain *flags*. + +``JSON_REJECT_DUPLICATES`` + Issue a decoding error if any JSON object in the input text + contains duplicate keys. Without this flag, the value of the last + occurrence of each key ends up in the result. Key equivalence is + checked byte-by-byte, without special Unicode comparison + algorithms. + + .. versionadded:: 2.1 + +``JSON_DECODE_ANY`` + By default, the decoder expects an array or object as the input. + With this flag enabled, the decoder accepts any valid JSON value. + + **Note:** Decoding any value may be useful in some scenarios, but + it's generally discouraged as it violates strict compatibility with + :rfc:`4627`. If you use this flag, don't expect interoperability + with other JSON systems. + + .. versionadded:: 2.3 + +``JSON_DISABLE_EOF_CHECK`` + By default, the decoder expects that its whole input constitutes a + valid JSON text, and issues an error if there's extra data after + the otherwise valid JSON input. With this flag enabled, the decoder + stops after decoding a valid JSON array or object, and thus allows + extra data after the JSON text. + + Normally, reading will stop when the last ``]`` or ``}`` in the + JSON input is encountered. If both ``JSON_DISABLE_EOF_CHECK`` and + ``JSON_DECODE_ANY`` flags are used, the decoder may read one extra + UTF-8 code unit (up to 4 bytes of input). For example, decoding + ``4true`` correctly decodes the integer 4, but also reads the + ``t``. For this reason, if reading multiple consecutive values that + are not arrays or objects, they should be separated by at least one + whitespace character. + + .. versionadded:: 2.1 + +``JSON_DECODE_INT_AS_REAL`` + JSON defines only one number type. Jansson distinguishes between + ints and reals. For more information see :ref:`real-vs-integer`. + With this flag enabled the decoder interprets all numbers as real + values. Integers that do not have an exact double representation + will silently result in a loss of precision. Integers that cause + a double overflow will cause an error. + + .. versionadded:: 2.5 + +``JSON_ALLOW_NUL`` + Allow ``\u0000`` escape inside string values. This is a safety + measure; If you know your input can contain null bytes, use this + flag. If you don't use this flag, you don't have to worry about null + bytes inside strings unless you explicitly create themselves by + using e.g. :func:`json_stringn()` or ``s#`` format specifier for + :func:`json_pack()`. + + Object keys cannot have embedded null bytes even if this flag is + used. + + .. versionadded:: 2.6 + +Each function also takes an optional :type:`json_error_t` parameter +that is filled with error information if decoding fails. It's also +updated on success; the number of bytes of input read is written to +its ``position`` field. This is especially useful when using +``JSON_DISABLE_EOF_CHECK`` to read multiple consecutive JSON texts. + +.. versionadded:: 2.3 + Number of bytes of input read is written to the ``position`` field + of the :type:`json_error_t` structure. + +If no error or position information is needed, you can pass *NULL*. + +.. function:: json_t *json_loads(const char *input, size_t flags, json_error_t *error) + + .. refcounting:: new + + Decodes the JSON string *input* and returns the array or object it + contains, or *NULL* on error, in which case *error* is filled with + information about the error. *flags* is described above. + +.. function:: json_t *json_loadb(const char *buffer, size_t buflen, size_t flags, json_error_t *error) + + .. refcounting:: new + + Decodes the JSON string *buffer*, whose length is *buflen*, and + returns the array or object it contains, or *NULL* on error, in + which case *error* is filled with information about the error. This + is similar to :func:`json_loads()` except that the string doesn't + need to be null-terminated. *flags* is described above. + + .. versionadded:: 2.1 + +.. function:: json_t *json_loadf(FILE *input, size_t flags, json_error_t *error) + + .. refcounting:: new + + Decodes the JSON text in stream *input* and returns the array or + object it contains, or *NULL* on error, in which case *error* is + filled with information about the error. *flags* is described + above. + + This function will start reading the input from whatever position + the input file was in, without attempting to seek first. If an error + occurs, the file position will be left indeterminate. On success, + the file position will be at EOF, unless ``JSON_DISABLE_EOF_CHECK`` + flag was used. In this case, the file position will be at the first + character after the last ``]`` or ``}`` in the JSON input. This + allows calling :func:`json_loadf()` on the same ``FILE`` object + multiple times, if the input consists of consecutive JSON texts, + possibly separated by whitespace. + +.. function:: json_t *json_loadfd(int input, size_t flags, json_error_t *error) + + .. refcounting:: new + + Decodes the JSON text in stream *input* and returns the array or + object it contains, or *NULL* on error, in which case *error* is + filled with information about the error. *flags* is described + above. + + This function will start reading the input from whatever position + the input file descriptor was in, without attempting to seek first. + If an error occurs, the file position will be left indeterminate. + On success, the file position will be at EOF, unless + ``JSON_DISABLE_EOF_CHECK`` flag was used. In this case, the file + descriptor's position will be at the first character after the last + ``]`` or ``}`` in the JSON input. This allows calling + :func:`json_loadfd()` on the same file descriptor multiple times, + if the input consists of consecutive JSON texts, possibly separated + by whitespace. + + It is important to note that this function can only succeed on stream + file descriptors (such as SOCK_STREAM). Using this function on a + non-stream file descriptor will result in undefined behavior. For + non-stream file descriptors, see instead :func:`json_loadb()`. In + addition, please note that this function cannot be used on non-blocking + file descriptors (such as a non-blocking socket). Using this function + on non-blocking file descriptors has a high risk of data loss because + it does not support resuming. + + This function requires POSIX and fails on all non-POSIX systems. + + .. versionadded:: 2.10 + +.. function:: json_t *json_load_file(const char *path, size_t flags, json_error_t *error) + + .. refcounting:: new + + Decodes the JSON text in file *path* and returns the array or + object it contains, or *NULL* on error, in which case *error* is + filled with information about the error. *flags* is described + above. + +.. type:: json_load_callback_t + + A typedef for a function that's called by + :func:`json_load_callback()` to read a chunk of input data:: + + typedef size_t (*json_load_callback_t)(void *buffer, size_t buflen, void *data); + + *buffer* points to a buffer of *buflen* bytes, and *data* is the + corresponding :func:`json_load_callback()` argument passed through. + + On success, the function should write at most *buflen* bytes to + *buffer*, and return the number of bytes written; a returned value + of 0 indicates that no data was produced and that the end of file + has been reached. On error, the function should return + ``(size_t)-1`` to abort the decoding process. + + In UTF-8, some code points are encoded as multi-byte sequences. The + callback function doesn't need to worry about this, as Jansson + handles it at a higher level. For example, you can safely read a + fixed number of bytes from a network connection without having to + care about code unit sequences broken apart by the chunk + boundaries. + + .. versionadded:: 2.4 + +.. function:: json_t *json_load_callback(json_load_callback_t callback, void *data, size_t flags, json_error_t *error) + + .. refcounting:: new + + Decodes the JSON text produced by repeated calls to *callback*, and + returns the array or object it contains, or *NULL* on error, in + which case *error* is filled with information about the error. + *data* is passed through to *callback* on each call. *flags* is + described above. + + .. versionadded:: 2.4 + + +.. _apiref-pack: + +Building Values +=============== + +This section describes functions that help to create, or *pack*, +complex JSON values, especially nested objects and arrays. Value +building is based on a *format string* that is used to tell the +functions about the expected arguments. + +For example, the format string ``"i"`` specifies a single integer +value, while the format string ``"[ssb]"`` or the equivalent ``"[s, s, +b]"`` specifies an array value with two strings and a boolean as its +items:: + + /* Create the JSON integer 42 */ + json_pack("i", 42); + + /* Create the JSON array ["foo", "bar", true] */ + json_pack("[ssb]", "foo", "bar", 1); + +Here's the full list of format specifiers. The type in parentheses +denotes the resulting JSON type, and the type in brackets (if any) +denotes the C type that is expected as the corresponding argument or +arguments. + +``s`` (string) [const char \*] + Convert a null terminated UTF-8 string to a JSON string. + +``s?`` (string) [const char \*] + Like ``s``, but if the argument is *NULL*, output a JSON null + value. + + .. versionadded:: 2.8 + +``s*`` (string) [const char \*] + Like ``s``, but if the argument is *NULL*, do not output any value. + This format can only be used inside an object or an array. If used + inside an object, the corresponding key is additionally suppressed + when the value is omitted. See below for an example. + + .. versionadded:: 2.11 + +``s#`` (string) [const char \*, int] + Convert a UTF-8 buffer of a given length to a JSON string. + + .. versionadded:: 2.5 + +``s%`` (string) [const char \*, size_t] + Like ``s#`` but the length argument is of type :type:`size_t`. + + .. versionadded:: 2.6 + +``+`` [const char \*] + Like ``s``, but concatenate to the previous string. Only valid + after ``s``, ``s#``, ``+`` or ``+#``. + + .. versionadded:: 2.5 + +``+#`` [const char \*, int] + Like ``s#``, but concatenate to the previous string. Only valid + after ``s``, ``s#``, ``+`` or ``+#``. + + .. versionadded:: 2.5 + +``+%`` (string) [const char \*, size_t] + Like ``+#`` but the length argument is of type :type:`size_t`. + + .. versionadded:: 2.6 + +``n`` (null) + Output a JSON null value. No argument is consumed. + +``b`` (boolean) [int] + Convert a C ``int`` to JSON boolean value. Zero is converted + to ``false`` and non-zero to ``true``. + +``i`` (integer) [int] + Convert a C ``int`` to JSON integer. + +``I`` (integer) [json_int_t] + Convert a C :type:`json_int_t` to JSON integer. + +``f`` (real) [double] + Convert a C ``double`` to JSON real. + +``o`` (any value) [json_t \*] + Output any given JSON value as-is. If the value is added to an + array or object, the reference to the value passed to ``o`` is + stolen by the container. + +``O`` (any value) [json_t \*] + Like ``o``, but the argument's reference count is incremented. + This is useful if you pack into an array or object and want to + keep the reference for the JSON value consumed by ``O`` to + yourself. + +``o?``, ``O?`` (any value) [json_t \*] + Like ``o`` and ``O``, respectively, but if the argument is + *NULL*, output a JSON null value. + + .. versionadded:: 2.8 + +``o*``, ``O*`` (any value) [json_t \*] + Like ``o`` and ``O``, respectively, but if the argument is + *NULL*, do not output any value. This format can only be used + inside an object or an array. If used inside an object, the + corresponding key is additionally suppressed. See below for an + example. + + .. versionadded:: 2.11 + +``[fmt]`` (array) + Build an array with contents from the inner format string. ``fmt`` + may contain objects and arrays, i.e. recursive value building is + supported. + +``{fmt}`` (object) + Build an object with contents from the inner format string + ``fmt``. The first, third, etc. format specifier represent a key, + and must be a string (see ``s``, ``s#``, ``+`` and ``+#`` above), + as object keys are always strings. The second, fourth, etc. format + specifier represent a value. Any value may be an object or array, + i.e. recursive value building is supported. + +Whitespace, ``:`` and ``,`` are ignored. + +.. function:: json_t *json_pack(const char *fmt, ...) + + .. refcounting:: new + + Build a new JSON value according to the format string *fmt*. For + each format specifier (except for ``{}[]n``), one or more arguments + are consumed and used to build the corresponding value. Returns + *NULL* on error. + +.. function:: json_t *json_pack_ex(json_error_t *error, size_t flags, const char *fmt, ...) + json_t *json_vpack_ex(json_error_t *error, size_t flags, const char *fmt, va_list ap) + + .. refcounting:: new + + Like :func:`json_pack()`, but an in the case of an error, an error + message is written to *error*, if it's not *NULL*. The *flags* + parameter is currently unused and should be set to 0. + + As only the errors in format string (and out-of-memory errors) can + be caught by the packer, these two functions are most likely only + useful for debugging format strings. + +More examples:: + + /* Build an empty JSON object */ + json_pack("{}"); + + /* Build the JSON object {"foo": 42, "bar": 7} */ + json_pack("{sisi}", "foo", 42, "bar", 7); + + /* Like above, ':', ',' and whitespace are ignored */ + json_pack("{s:i, s:i}", "foo", 42, "bar", 7); + + /* Build the JSON array [[1, 2], {"cool": true}] */ + json_pack("[[i,i],{s:b}]", 1, 2, "cool", 1); + + /* Build a string from a non-null terminated buffer */ + char buffer[4] = {'t', 'e', 's', 't'}; + json_pack("s#", buffer, 4); + + /* Concatenate strings together to build the JSON string "foobarbaz" */ + json_pack("s++", "foo", "bar", "baz"); + + /* Create an empty object or array when optional members are missing */ + json_pack("{s:s*,s:o*,s:O*}", "foo", NULL, "bar", NULL, "baz", NULL); + json_pack("[s*,o*,O*]", NULL, NULL, NULL); + + +.. _apiref-unpack: + +Parsing and Validating Values +============================= + +This section describes functions that help to validate complex values +and extract, or *unpack*, data from them. Like :ref:`building values +`, this is also based on format strings. + +While a JSON value is unpacked, the type specified in the format +string is checked to match that of the JSON value. This is the +validation part of the process. In addition to this, the unpacking +functions can also check that all items of arrays and objects are +unpacked. This check be enabled with the format specifier ``!`` or by +using the flag ``JSON_STRICT``. See below for details. + +Here's the full list of format specifiers. The type in parentheses +denotes the JSON type, and the type in brackets (if any) denotes the C +type whose address should be passed. + +``s`` (string) [const char \*] + Convert a JSON string to a pointer to a null terminated UTF-8 + string. The resulting string is extracted by using + :func:`json_string_value()` internally, so it exists as long as + there are still references to the corresponding JSON string. + +``s%`` (string) [const char \*, size_t \*] + Convert a JSON string to a pointer to a null terminated UTF-8 + string and its length. + + .. versionadded:: 2.6 + +``n`` (null) + Expect a JSON null value. Nothing is extracted. + +``b`` (boolean) [int] + Convert a JSON boolean value to a C ``int``, so that ``true`` + is converted to 1 and ``false`` to 0. + +``i`` (integer) [int] + Convert a JSON integer to C ``int``. + +``I`` (integer) [json_int_t] + Convert a JSON integer to C :type:`json_int_t`. + +``f`` (real) [double] + Convert a JSON real to C ``double``. + +``F`` (integer or real) [double] + Convert a JSON number (integer or real) to C ``double``. + +``o`` (any value) [json_t \*] + Store a JSON value with no conversion to a :type:`json_t` pointer. + +``O`` (any value) [json_t \*] + Like ``o``, but the JSON value's reference count is incremented. + Storage pointers should be initialized NULL before using unpack. + The caller is responsible for releasing all references incremented + by unpack, even when an error occurs. + +``[fmt]`` (array) + Convert each item in the JSON array according to the inner format + string. ``fmt`` may contain objects and arrays, i.e. recursive + value extraction is supported. + +``{fmt}`` (object) + Convert each item in the JSON object according to the inner format + string ``fmt``. The first, third, etc. format specifier represent + a key, and must be ``s``. The corresponding argument to unpack + functions is read as the object key. The second, fourth, etc. + format specifier represent a value and is written to the address + given as the corresponding argument. **Note** that every other + argument is read from and every other is written to. + + ``fmt`` may contain objects and arrays as values, i.e. recursive + value extraction is supported. + + .. versionadded:: 2.3 + Any ``s`` representing a key may be suffixed with a ``?`` to + make the key optional. If the key is not found, nothing is + extracted. See below for an example. + +``!`` + This special format specifier is used to enable the check that + all object and array items are accessed, on a per-value basis. It + must appear inside an array or object as the last format specifier + before the closing bracket or brace. To enable the check globally, + use the ``JSON_STRICT`` unpacking flag. + +``*`` + This special format specifier is the opposite of ``!``. If the + ``JSON_STRICT`` flag is used, ``*`` can be used to disable the + strict check on a per-value basis. It must appear inside an array + or object as the last format specifier before the closing bracket + or brace. + +Whitespace, ``:`` and ``,`` are ignored. + +.. function:: int json_unpack(json_t *root, const char *fmt, ...) + + Validate and unpack the JSON value *root* according to the format + string *fmt*. Returns 0 on success and -1 on failure. + +.. function:: int json_unpack_ex(json_t *root, json_error_t *error, size_t flags, const char *fmt, ...) + int json_vunpack_ex(json_t *root, json_error_t *error, size_t flags, const char *fmt, va_list ap) + + Validate and unpack the JSON value *root* according to the format + string *fmt*. If an error occurs and *error* is not *NULL*, write + error information to *error*. *flags* can be used to control the + behaviour of the unpacker, see below for the flags. Returns 0 on + success and -1 on failure. + +.. note:: + + The first argument of all unpack functions is ``json_t *root`` + instead of ``const json_t *root``, because the use of ``O`` format + specifier causes the reference count of ``root``, or some value + reachable from ``root``, to be increased. Furthermore, the ``o`` + format specifier may be used to extract a value as-is, which allows + modifying the structure or contents of a value reachable from + ``root``. + + If the ``O`` and ``o`` format specifiers are not used, it's + perfectly safe to cast a ``const json_t *`` variable to plain + ``json_t *`` when used with these functions. + +The following unpacking flags are available: + +``JSON_STRICT`` + Enable the extra validation step checking that all object and + array items are unpacked. This is equivalent to appending the + format specifier ``!`` to the end of every array and object in the + format string. + +``JSON_VALIDATE_ONLY`` + Don't extract any data, just validate the JSON value against the + given format string. Note that object keys must still be specified + after the format string. + +Examples:: + + /* root is the JSON integer 42 */ + int myint; + json_unpack(root, "i", &myint); + assert(myint == 42); + + /* root is the JSON object {"foo": "bar", "quux": true} */ + const char *str; + int boolean; + json_unpack(root, "{s:s, s:b}", "foo", &str, "quux", &boolean); + assert(strcmp(str, "bar") == 0 && boolean == 1); + + /* root is the JSON array [[1, 2], {"baz": null} */ + json_error_t error; + json_unpack_ex(root, &error, JSON_VALIDATE_ONLY, "[[i,i], {s:n}]", "baz"); + /* returns 0 for validation success, nothing is extracted */ + + /* root is the JSON array [1, 2, 3, 4, 5] */ + int myint1, myint2; + json_unpack(root, "[ii!]", &myint1, &myint2); + /* returns -1 for failed validation */ + + /* root is an empty JSON object */ + int myint = 0, myint2 = 0, myint3 = 0; + json_unpack(root, "{s?i, s?[ii]}", + "foo", &myint1, + "bar", &myint2, &myint3); + /* myint1, myint2 or myint3 is no touched as "foo" and "bar" don't exist */ + + +Equality +======== + +Testing for equality of two JSON values cannot, in general, be +achieved using the ``==`` operator. Equality in the terms of the +``==`` operator states that the two :type:`json_t` pointers point to +exactly the same JSON value. However, two JSON values can be equal not +only if they are exactly the same value, but also if they have equal +"contents": + +* Two integer or real values are equal if their contained numeric + values are equal. An integer value is never equal to a real value, + though. + +* Two strings are equal if their contained UTF-8 strings are equal, + byte by byte. Unicode comparison algorithms are not implemented. + +* Two arrays are equal if they have the same number of elements and + each element in the first array is equal to the corresponding + element in the second array. + +* Two objects are equal if they have exactly the same keys and the + value for each key in the first object is equal to the value of the + corresponding key in the second object. + +* Two true, false or null values have no "contents", so they are equal + if their types are equal. (Because these values are singletons, + their equality can actually be tested with ``==``.) + +.. function:: int json_equal(json_t *value1, json_t *value2) + + Returns 1 if *value1* and *value2* are equal, as defined above. + Returns 0 if they are unequal or one or both of the pointers are + *NULL*. + + +Copying +======= + +Because of reference counting, passing JSON values around doesn't +require copying them. But sometimes a fresh copy of a JSON value is +needed. For example, if you need to modify an array, but still want to +use the original afterwards, you should take a copy of it first. + +Jansson supports two kinds of copying: shallow and deep. There is a +difference between these methods only for arrays and objects. Shallow +copying only copies the first level value (array or object) and uses +the same child values in the copied value. Deep copying makes a fresh +copy of the child values, too. Moreover, all the child values are deep +copied in a recursive fashion. + +Copying objects preserves the insertion order of keys. + +.. function:: json_t *json_copy(json_t *value) + + .. refcounting:: new + + Returns a shallow copy of *value*, or *NULL* on error. + +.. function:: json_t *json_deep_copy(const json_t *value) + + .. refcounting:: new + + Returns a deep copy of *value*, or *NULL* on error. + + +.. _apiref-custom-memory-allocation: + +Custom Memory Allocation +======================== + +By default, Jansson uses :func:`malloc()` and :func:`free()` for +memory allocation. These functions can be overridden if custom +behavior is needed. + +.. type:: json_malloc_t + + A typedef for a function pointer with :func:`malloc()`'s + signature:: + + typedef void *(*json_malloc_t)(size_t); + +.. type:: json_free_t + + A typedef for a function pointer with :func:`free()`'s + signature:: + + typedef void (*json_free_t)(void *); + +.. function:: void json_set_alloc_funcs(json_malloc_t malloc_fn, json_free_t free_fn) + + Use *malloc_fn* instead of :func:`malloc()` and *free_fn* instead + of :func:`free()`. This function has to be called before any other + Jansson's API functions to ensure that all memory operations use + the same functions. + +.. function:: void json_get_alloc_funcs(json_malloc_t *malloc_fn, json_free_t *free_fn) + + Fetch the current malloc_fn and free_fn used. Either parameter + may be NULL. + + .. versionadded:: 2.8 + +**Examples:** + +Circumvent problems with different CRT heaps on Windows by using +application's :func:`malloc()` and :func:`free()`:: + + json_set_alloc_funcs(malloc, free); + +Use the `Boehm's conservative garbage collector`_ for memory +operations:: + + json_set_alloc_funcs(GC_malloc, GC_free); + +.. _Boehm's conservative garbage collector: http://www.hboehm.info/gc/ + +Allow storing sensitive data (e.g. passwords or encryption keys) in +JSON structures by zeroing all memory when freed:: + + static void *secure_malloc(size_t size) + { + /* Store the memory area size in the beginning of the block */ + void *ptr = malloc(size + 8); + *((size_t *)ptr) = size; + return ptr + 8; + } + + static void secure_free(void *ptr) + { + size_t size; + + ptr -= 8; + size = *((size_t *)ptr); + + guaranteed_memset(ptr, 0, size + 8); + free(ptr); + } + + int main() + { + json_set_alloc_funcs(secure_malloc, secure_free); + /* ... */ + } + +For more information about the issues of storing sensitive data in +memory, see +http://www.dwheeler.com/secure-programs/Secure-Programs-HOWTO/protect-secrets.html. +The page also explains the :func:`guaranteed_memset()` function used +in the example and gives a sample implementation for it. + +.. _fixed_length_keys: + +Fixed-Length keys +================= + +The Jansson API allows work with fixed-length keys. This can be useful in the following cases: + +* The key is contained inside a buffer and is not null-terminated. In this case creating a new temporary buffer is not needed. +* The key contains U+0000 inside it. + +List of API for fixed-length keys: + +* :c:func:`json_object_getn` +* :c:func:`json_object_setn` +* :c:func:`json_object_setn_nocheck` +* :c:func:`json_object_setn_new` +* :c:func:`json_object_setn_new_nocheck` +* :c:func:`json_object_deln` +* :c:func:`json_object_iter_key_len` +* :c:func:`json_object_keylen_foreach` +* :c:func:`json_object_keylen_foreach_safe` + +**Examples:** + +Try to write a new function to get :c:struct:`json_t` by path separated by ``.`` + +This requires: + +* string iterator (no need to modify the input for better performance) +* API for working with fixed-size keys + +The iterator:: + + struct string { + const char *string; + size_t length; + }; + + size_t string_try_next(struct string *str, const char *delimiter) { + str->string += strspn(str->string, delimiter); + str->length = strcspn(str->string, delimiter); + return str->length; + } + + #define string_foreach(_string, _delimiter) \ + for (; string_try_next(&(_string), _delimiter); (_string).string += (_string).length) + + +The function:: + + json_t *json_object_get_by_path(json_t *object, const char *path) { + struct string str; + json_t *out = object; + + str.string = path; + + string_foreach(str, ".") { + out = json_object_getn(out, str.string, str.length); + if (out == NULL) + return NULL; + } + + return out; + } + +And usage:: + + int main(void) { + json_t *obj = json_pack("{s:{s:{s:b}}}", "a", "b", "c", 1); + + json_t *c = json_object_get_by_path(obj, "a.b.c"); + assert(json_is_true(c)); + + json_decref(obj); + } diff --git a/solo-ckpool-source/src/jansson-2.14/doc/changes.rst b/solo-ckpool-source/src/jansson-2.14/doc/changes.rst new file mode 100644 index 0000000..ea56843 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/doc/changes.rst @@ -0,0 +1,5 @@ +****************** +Changes in Jansson +****************** + +.. include:: ../CHANGES diff --git a/solo-ckpool-source/src/jansson-2.14/doc/conf.py b/solo-ckpool-source/src/jansson-2.14/doc/conf.py new file mode 100644 index 0000000..2426171 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/doc/conf.py @@ -0,0 +1,217 @@ +# -*- coding: utf-8 -*- +# +# Jansson documentation build configuration file, created by +# sphinx-quickstart on Sun Sep 5 21:47:20 2010. +# +# This file is execfile()d with the current directory set to its containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys, os + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath('ext')) + +# -- General configuration ----------------------------------------------------- + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be extensions +# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. +extensions = ['refcounting'] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +#source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'Jansson' +copyright = u'2009-2020, Petri Lehtinen' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = '2.14' +# The full version, including alpha/beta/rc tags. +release = version + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +#language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +#today = '' +# Else, today_fmt is used as the format for a strftime call. +#today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ['_build'] + +# The reST default role (used for this markup: `text`) to use for all documents. +default_role = 'c:func' +primary_domain = 'c' + +# If true, '()' will be appended to :func: etc. cross-reference text. +#add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +#add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +#show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +#modindex_common_prefix = [] + + +# -- Options for HTML output --------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +#html_theme = 'default' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +#html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +#html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +#html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +#html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +#html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +#html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +#html_static_path = ['_static'] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +#html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +#html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +#html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +#html_additional_pages = {} + +# If false, no module index is generated. +#html_domain_indices = True + +# If false, no index is generated. +#html_use_index = True + +# If true, the index is split into individual pages for each letter. +#html_split_index = False + +# If true, links to the reST sources are added to the pages. +#html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +#html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +#html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +#html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +#html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'Janssondoc' + + +# -- Options for LaTeX output -------------------------------------------------- + +# The paper size ('letter' or 'a4'). +#latex_paper_size = 'letter' + +# The font size ('10pt', '11pt' or '12pt'). +#latex_font_size = '10pt' + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, author, documentclass [howto/manual]). +latex_documents = [ + ('index', 'Jansson.tex', u'Jansson Documentation', + u'Petri Lehtinen', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +#latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +#latex_use_parts = False + +# If true, show page references after internal links. +#latex_show_pagerefs = False + +# If true, show URL addresses after external links. +#latex_show_urls = False + +# Additional stuff for the LaTeX preamble. +#latex_preamble = '' + +# Documents to append as an appendix to all manuals. +#latex_appendices = [] + +# If false, no module index is generated. +#latex_domain_indices = True + + +# -- Options for manual page output -------------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ('index', 'jansson', u'Jansson Documentation', + [u'Petri Lehtinen'], 1) +] diff --git a/solo-ckpool-source/src/jansson-2.14/doc/conformance.rst b/solo-ckpool-source/src/jansson-2.14/doc/conformance.rst new file mode 100644 index 0000000..5556a6b --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/doc/conformance.rst @@ -0,0 +1,119 @@ +.. _rfc-conformance: + +*************** +RFC Conformance +*************** + +JSON is specified in :rfc:`4627`, *"The application/json Media Type +for JavaScript Object Notation (JSON)"*. + +Character Encoding +================== + +Jansson only supports UTF-8 encoded JSON texts. It does not support or +auto-detect any of the other encodings mentioned in the RFC, namely +UTF-16LE, UTF-16BE, UTF-32LE or UTF-32BE. Pure ASCII is supported, as +it's a subset of UTF-8. + +Strings +======= + +JSON strings are mapped to C-style null-terminated character arrays, +and UTF-8 encoding is used internally. + +All Unicode codepoints U+0000 through U+10FFFF are allowed in string +values. However, U+0000 is allowed in object keys only for length-aware functions. + +Unicode normalization or any other transformation is never performed +on any strings (string values or object keys). When checking for +equivalence of strings or object keys, the comparison is performed +byte by byte between the original UTF-8 representations of the +strings. + +Numbers +======= + +.. _real-vs-integer: + +Real vs. Integer +---------------- + +JSON makes no distinction between real and integer numbers; Jansson +does. Real numbers are mapped to the ``double`` type and integers to +the ``json_int_t`` type, which is a typedef of ``long long`` or +``long``, depending on whether ``long long`` is supported by your +compiler or not. + +A JSON number is considered to be a real number if its lexical +representation includes one of ``e``, ``E``, or ``.``; regardless if +its actual numeric value is a true integer (e.g., all of ``1E6``, +``3.0``, ``400E-2``, and ``3.14E3`` are mathematical integers, but +will be treated as real values). With the ``JSON_DECODE_INT_AS_REAL`` +decoder flag set all numbers are interpreted as real. + +All other JSON numbers are considered integers. + +When encoding to JSON, real values are always represented +with a fractional part; e.g., the ``double`` value 3.0 will be +represented in JSON as ``3.0``, not ``3``. + +Overflow, Underflow & Precision +------------------------------- + +Real numbers whose absolute values are too small to be represented in +a C ``double`` will be silently estimated with 0.0. Thus, depending on +platform, JSON numbers very close to zero such as 1E-999 may result in +0.0. + +Real numbers whose absolute values are too large to be represented in +a C ``double`` will result in an overflow error (a JSON decoding +error). Thus, depending on platform, JSON numbers like 1E+999 or +-1E+999 may result in a parsing error. + +Likewise, integer numbers whose absolute values are too large to be +represented in the ``json_int_t`` type (see above) will result in an +overflow error (a JSON decoding error). Thus, depending on platform, +JSON numbers like 1000000000000000 may result in parsing error. + +Parsing JSON real numbers may result in a loss of precision. As long +as overflow does not occur (i.e. a total loss of precision), the +rounded approximate value is silently used. Thus the JSON number +1.000000000000000005 may, depending on platform, result in the +``double`` value 1.0. + +Signed zeros +------------ + +JSON makes no statement about what a number means; however Javascript +(ECMAscript) does state that +0.0 and -0.0 must be treated as being +distinct values, i.e. -0.0 |not-equal| 0.0. Jansson relies on the +underlying floating point library in the C environment in which it is +compiled. Therefore it is platform-dependent whether 0.0 and -0.0 will +be distinct values. Most platforms that use the IEEE 754 +floating-point standard will support signed zeros. + +Note that this only applies to floating-point; neither JSON, C, or +IEEE support the concept of signed integer zeros. + +.. |not-equal| unicode:: U+2260 + +Types +----- + +No support is provided in Jansson for any C numeric types other than +``json_int_t`` and ``double``. This excludes things such as unsigned +types, ``long double``, etc. Obviously, shorter types like ``short``, +``int``, ``long`` (if ``json_int_t`` is ``long long``) and ``float`` +are implicitly handled via the ordinary C type coercion rules (subject +to overflow semantics). Also, no support or hooks are provided for any +supplemental "bignum" type add-on packages. + +Depth of nested values +====================== + +To avoid stack exhaustion, Jansson currently limits the nesting depth +for arrays and objects to a certain value (default: 2048), defined as +a macro ``JSON_PARSER_MAX_DEPTH`` within ``jansson_config.h``. + +The limit is allowed to be set by the RFC; there is no recommended value +or required minimum depth to be supported. diff --git a/solo-ckpool-source/src/jansson-2.14/doc/ext/refcounting.py b/solo-ckpool-source/src/jansson-2.14/doc/ext/refcounting.py new file mode 100644 index 0000000..e72c481 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/doc/ext/refcounting.py @@ -0,0 +1,69 @@ +""" + refcounting + ~~~~~~~~~~~ + + Reference count annotations for C API functions. Has the same + result as the sphinx.ext.refcounting extension but works for all + functions regardless of the signature, and the reference counting + information is written inline with the documentation instead of a + separate file. + + Adds a new directive "refcounting". The directive has no content + and one required positional parameter:: "new" or "borrow". + + Example: + + .. cfunction:: json_t *json_object(void) + + .. refcounting:: new + + + + :copyright: Copyright (c) 2009-2016 Petri Lehtinen + :license: MIT, see LICENSE for details. +""" + +from docutils import nodes +from docutils.parsers.rst import Directive + + +def visit(self, node): + self.visit_emphasis(node) + +def depart(self, node): + self.depart_emphasis(node) + +def html_visit(self, node): + self.body.append(self.starttag(node, 'em', '', CLASS='refcount')) + +def html_depart(self, node): + self.body.append('') + + +class refcounting(nodes.emphasis): + pass + +class refcounting_directive(Directive): + has_content = False + required_arguments = 1 + optional_arguments = 0 + final_argument_whitespace = False + + def run(self): + if self.arguments[0] == 'borrow': + text = 'Return value: Borrowed reference.' + elif self.arguments[0] == 'new': + text = 'Return value: New reference.' + else: + raise Error('Valid arguments: new, borrow') + + return [refcounting(text, text)] + + +def setup(app): + app.add_node(refcounting, + html=(html_visit, html_depart), + latex=(visit, depart), + text=(visit, depart), + man=(visit, depart)) + app.add_directive('refcounting', refcounting_directive) diff --git a/solo-ckpool-source/src/jansson-2.14/doc/gettingstarted.rst b/solo-ckpool-source/src/jansson-2.14/doc/gettingstarted.rst new file mode 100644 index 0000000..4cd1977 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/doc/gettingstarted.rst @@ -0,0 +1,264 @@ +*************** +Getting Started +*************** + +.. highlight:: c + +Compiling and Installing Jansson +================================ + +The Jansson source is available at +http://www.digip.org/jansson/releases/. + +Unix-like systems (including MinGW) +----------------------------------- + +Unpack the source tarball and change to the source directory: + +.. parsed-literal:: + + bunzip2 -c jansson-|release|.tar.bz2 | tar xf - + cd jansson-|release| + +The source uses GNU Autotools (autoconf_, automake_, libtool_), so +compiling and installing is extremely simple:: + + ./configure + make + make check + make install + +To change the destination directory (``/usr/local`` by default), use +the ``--prefix=DIR`` argument to ``./configure``. See ``./configure +--help`` for the list of all possible configuration options. + +The command ``make check`` runs the test suite distributed with +Jansson. This step is not strictly necessary, but it may find possible +problems that Jansson has on your platform. If any problems are found, +please report them. + +If you obtained the source from a Git repository (or any other source +control system), there's no ``./configure`` script as it's not kept in +version control. To create the script, the build system needs to be +bootstrapped. There are many ways to do this, but the easiest one is +to use ``autoreconf``:: + + autoreconf -fi + +This command creates the ``./configure`` script, which can then be +used as described above. + +.. _autoconf: http://www.gnu.org/software/autoconf/ +.. _automake: http://www.gnu.org/software/automake/ +.. _libtool: http://www.gnu.org/software/libtool/ + + +.. _build-cmake: + +CMake (various platforms, including Windows) +-------------------------------------------- + +Jansson can be built using CMake_. Create a build directory for an +out-of-tree build, change to that directory, and run ``cmake`` (or ``ccmake``, +``cmake-gui``, or similar) to configure the project. + +See the examples below for more detailed information. + +.. note:: In the below examples ``..`` is used as an argument for ``cmake``. + This is simply the path to the jansson project root directory. + In the example it is assumed you've created a sub-directory ``build`` + and are using that. You could use any path you want. + +.. _build-cmake-unix: + +Unix (Make files) +^^^^^^^^^^^^^^^^^ +Generating make files on unix: + +.. parsed-literal:: + + bunzip2 -c jansson-|release|.tar.bz2 | tar xf - + cd jansson-|release| + + mkdir build + cd build + cmake .. # or ccmake .. for a GUI. + +.. note:: + + If you don't want to build docs or ``Sphinx`` is not installed, you should add ``"-DJANSSON_BUILD_DOCS=OFF"`` in the ``cmake`` command. + + +Then to build:: + + make + make check + make install + +Windows (Visual Studio) +^^^^^^^^^^^^^^^^^^^^^^^ +Creating Visual Studio project files from the command line: + +.. parsed-literal:: + + + cd jansson-|release| + + md build + cd build + cmake -G "Visual Studio 15 2017" .. + +.. note:: + + You should replace the name of the generator (``-G`` flag) matching + the Visual Studio version installed on your system. Currently, the + following versions are supported: + + - ``Visual Studio 9 2008`` + - ``Visual Studio 10 2010`` + - ``Visual Studio 11 2012`` + - ``Visual Studio 12 2013`` + - ``Visual Studio 14 2015`` + - ``Visual Studio 15 2017`` + - ``Visual Studio 16 2019`` + + Any later version should also work. + +You will now have a *Visual Studio Solution* in your build directory. +To run the unit tests build the ``RUN_TESTS`` project. + +If you prefer a GUI the ``cmake`` line in the above example can +be replaced with:: + + cmake-gui .. + +For command line help (including a list of available generators) +for CMake_ simply run:: + + cmake + +To list available CMake_ settings (and what they are currently set to) +for the project, run:: + + cmake -LH .. + +Windows (MinGW) +^^^^^^^^^^^^^^^ +If you prefer using MinGW on Windows, make sure MinGW installed and ``{MinGW}/bin`` has been added to ``PATH``, then do the following commands: + +.. parsed-literal:: + + + cd jansson-|release| + + md build + cd build + cmake -G "MinGW Makefiles" .. + mingw32-make + + +Mac OSX (Xcode) +^^^^^^^^^^^^^^^ +If you prefer using Xcode instead of make files on OSX, +do the following. (Use the same steps as +for :ref:`Unix `):: + + ... + cmake -G "Xcode" .. + +Additional CMake settings +^^^^^^^^^^^^^^^^^^^^^^^^^ + +Shared library +"""""""""""""" +By default the CMake_ project will generate build files for building the +static library. To build the shared version use:: + + ... + cmake -DJANSSON_BUILD_SHARED_LIBS=1 .. + +Changing install directory (same as autoconf --prefix) +"""""""""""""""""""""""""""""""""""""""""""""""""""""" +Just as with the autoconf_ project you can change the destination directory +for ``make install``. The equivalent for autoconfs ``./configure --prefix`` +in CMake_ is:: + + ... + cmake -DCMAKE_INSTALL_PREFIX:PATH=/some/other/path .. + make install + +.. _CMake: http://www.cmake.org + + +Android +------- + +Jansson can be built for Android platforms. Android.mk is in the +source root directory. The configuration header file is located in the +``android`` directory in the source distribution. + + +Other Systems +------------- + +On non Unix-like systems, you may be unable to run the ``./configure`` +script. In this case, follow these steps. All the files mentioned can +be found in the ``src/`` directory. + +1. Create ``jansson_config.h`` (which has some platform-specific + parameters that are normally filled in by the ``./configure`` + script). Edit ``jansson_config.h.in``, replacing all ``@variable@`` + placeholders, and rename the file to ``jansson_config.h``. + +2. Make ``jansson.h`` and ``jansson_config.h`` available to the + compiler, so that they can be found when compiling programs that + use Jansson. + +3. Compile all the ``.c`` files (in the ``src/`` directory) into a + library file. Make the library available to the compiler, as in + step 2. + + +Building the Documentation +-------------------------- + +(This subsection describes how to build the HTML documentation you are +currently reading, so it can be safely skipped.) + +Documentation is in the ``doc/`` subdirectory. It's written in +reStructuredText_ with Sphinx_ annotations. To generate the HTML +documentation, invoke:: + + make html + +and point your browser to ``doc/_build/html/index.html``. Sphinx_ 1.0 +or newer is required to generate the documentation. + +.. _reStructuredText: http://docutils.sourceforge.net/rst.html +.. _Sphinx: http://sphinx.pocoo.org/ + + +Compiling Programs that Use Jansson +=================================== + +Jansson involves one C header file, :file:`jansson.h`, so it's enough +to put the line + +:: + + #include + +in the beginning of every source file that uses Jansson. + +There's also just one library to link with, ``libjansson``. Compile and +link the program as follows:: + + cc -o prog prog.c -ljansson + +Starting from version 1.2, there's also support for pkg-config_: + +.. code-block:: shell + + cc -o prog prog.c `pkg-config --cflags --libs jansson` + +.. _pkg-config: http://pkg-config.freedesktop.org/ diff --git a/solo-ckpool-source/src/jansson-2.14/doc/github_commits.c b/solo-ckpool-source/src/jansson-2.14/doc/github_commits.c new file mode 100644 index 0000000..c020f46 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/doc/github_commits.c @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2009-2016 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#include +#include + +#include +#include + +#define BUFFER_SIZE (256 * 1024) /* 256 KB */ + +#define URL_FORMAT "https://api.github.com/repos/%s/%s/commits" +#define URL_SIZE 256 + +/* Return the offset of the first newline in text or the length of + text if there's no newline */ +static int newline_offset(const char *text) { + const char *newline = strchr(text, '\n'); + if (!newline) + return strlen(text); + else + return (int)(newline - text); +} + +struct write_result { + char *data; + int pos; +}; + +static size_t write_response(void *ptr, size_t size, size_t nmemb, void *stream) { + struct write_result *result = (struct write_result *)stream; + + if (result->pos + size * nmemb >= BUFFER_SIZE - 1) { + fprintf(stderr, "error: too small buffer\n"); + return 0; + } + + memcpy(result->data + result->pos, ptr, size * nmemb); + result->pos += size * nmemb; + + return size * nmemb; +} + +static char *request(const char *url) { + CURL *curl = NULL; + CURLcode status; + struct curl_slist *headers = NULL; + char *data = NULL; + long code; + + curl_global_init(CURL_GLOBAL_ALL); + curl = curl_easy_init(); + if (!curl) + goto error; + + data = malloc(BUFFER_SIZE); + if (!data) + goto error; + + struct write_result write_result = {.data = data, .pos = 0}; + + curl_easy_setopt(curl, CURLOPT_URL, url); + + /* GitHub commits API v3 requires a User-Agent header */ + headers = curl_slist_append(headers, "User-Agent: Jansson-Tutorial"); + curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); + + curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_response); + curl_easy_setopt(curl, CURLOPT_WRITEDATA, &write_result); + + status = curl_easy_perform(curl); + if (status != 0) { + fprintf(stderr, "error: unable to request data from %s:\n", url); + fprintf(stderr, "%s\n", curl_easy_strerror(status)); + goto error; + } + + curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &code); + if (code != 200) { + fprintf(stderr, "error: server responded with code %ld\n", code); + goto error; + } + + curl_easy_cleanup(curl); + curl_slist_free_all(headers); + curl_global_cleanup(); + + /* zero-terminate the result */ + data[write_result.pos] = '\0'; + + return data; + +error: + if (data) + free(data); + if (curl) + curl_easy_cleanup(curl); + if (headers) + curl_slist_free_all(headers); + curl_global_cleanup(); + return NULL; +} + +int main(int argc, char *argv[]) { + size_t i; + char *text; + char url[URL_SIZE]; + + json_t *root; + json_error_t error; + + if (argc != 3) { + fprintf(stderr, "usage: %s USER REPOSITORY\n\n", argv[0]); + fprintf(stderr, "List commits at USER's REPOSITORY.\n\n"); + return 2; + } + + snprintf(url, URL_SIZE, URL_FORMAT, argv[1], argv[2]); + + text = request(url); + if (!text) + return 1; + + root = json_loads(text, 0, &error); + free(text); + + if (!root) { + fprintf(stderr, "error: on line %d: %s\n", error.line, error.text); + return 1; + } + + if (!json_is_array(root)) { + fprintf(stderr, "error: root is not an array\n"); + json_decref(root); + return 1; + } + + for (i = 0; i < json_array_size(root); i++) { + json_t *data, *sha, *commit, *message; + const char *message_text; + + data = json_array_get(root, i); + if (!json_is_object(data)) { + fprintf(stderr, "error: commit data %d is not an object\n", (int)(i + 1)); + json_decref(root); + return 1; + } + + sha = json_object_get(data, "sha"); + if (!json_is_string(sha)) { + fprintf(stderr, "error: commit %d: sha is not a string\n", (int)(i + 1)); + return 1; + } + + commit = json_object_get(data, "commit"); + if (!json_is_object(commit)) { + fprintf(stderr, "error: commit %d: commit is not an object\n", (int)(i + 1)); + json_decref(root); + return 1; + } + + message = json_object_get(commit, "message"); + if (!json_is_string(message)) { + fprintf(stderr, "error: commit %d: message is not a string\n", (int)(i + 1)); + json_decref(root); + return 1; + } + + message_text = json_string_value(message); + printf("%.8s %.*s\n", json_string_value(sha), newline_offset(message_text), + message_text); + } + + json_decref(root); + return 0; +} diff --git a/solo-ckpool-source/src/jansson-2.14/doc/index.rst b/solo-ckpool-source/src/jansson-2.14/doc/index.rst new file mode 100644 index 0000000..c679f40 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/doc/index.rst @@ -0,0 +1,53 @@ +Jansson Documentation +===================== + +This is the documentation for Jansson_ |release|, last updated |today|. + +Introduction +------------ + +Jansson_ is a C library for encoding, decoding and manipulating JSON +data. Its main features and design principles are: + +- Simple and intuitive API and data model + +- Comprehensive documentation + +- No dependencies on other libraries + +- Full Unicode support (UTF-8) + +- Extensive test suite + +Jansson is licensed under the `MIT license`_; see LICENSE in the +source distribution for details. + +Jansson is used in production and its API is stable. It works on +numerous platforms, including numerous Unix like systems and Windows. +It's suitable for use on any system, including desktop, server, and +small embedded systems. + + +.. _`MIT license`: http://www.opensource.org/licenses/mit-license.php +.. _Jansson: http://www.digip.org/jansson/ + +Contents +-------- + +.. toctree:: + :maxdepth: 2 + + gettingstarted + upgrading + tutorial + conformance + threadsafety + apiref + changes + + +Indices and Tables +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/solo-ckpool-source/src/jansson-2.14/doc/threadsafety.rst b/solo-ckpool-source/src/jansson-2.14/doc/threadsafety.rst new file mode 100644 index 0000000..0eebb29 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/doc/threadsafety.rst @@ -0,0 +1,82 @@ +.. _thread-safety: + +************* +Thread safety +************* + +Jansson as a library is thread safe and has no mutable global state. +The only exceptions are the hash function seed and memory allocation +functions, see below. + +There's no locking performed inside Jansson's code. **Read-only** +access to JSON values shared by multiple threads is safe, but +**mutating** a JSON value that's shared by multiple threads is not. A +multithreaded program must perform its own locking if JSON values +shared by multiple threads are mutated. + +However, **reference count manipulation** (:func:`json_incref()`, +:func:`json_decref()`) is usually thread-safe, and can be performed on +JSON values that are shared among threads. The thread-safety of +reference counting can be checked with the +``JANSSON_THREAD_SAFE_REFCOUNT`` preprocessor constant. Thread-safe +reference count manipulation is achieved using compiler built-in +atomic functions, which are available in most modern compilers. + +If compiler support is not available (``JANSSON_THREAD_SAFE_REFCOUNT`` +is not defined), it may be very difficult to ensure thread safety of +reference counting. It's possible to have a reference to a value +that's also stored inside an array or object in another thread. +Modifying the container (adding or removing values) may trigger +concurrent access to such values, as containers manage the reference +count of their contained values. + + +Hash function seed +================== + +To prevent an attacker from intentionally causing large JSON objects +with specially crafted keys to perform very slow, the hash function +used by Jansson is randomized using a seed value. The seed is +automatically generated on the first explicit or implicit call to +:func:`json_object()`, if :func:`json_object_seed()` has not been +called beforehand. + +The seed is generated by using operating system's entropy sources if +they are available (``/dev/urandom``, ``CryptGenRandom()``). The +initialization is done in as thread safe manner as possible, by using +architecture specific lockless operations if provided by the platform +or the compiler. + +If you're using threads, it's recommended to autoseed the hashtable +explicitly before spawning any threads by calling +``json_object_seed(0)`` , especially if you're unsure whether the +initialization is thread safe on your platform. + + +Memory allocation functions +=========================== + +Memory allocation functions should be set at most once, and only on +program startup. See :ref:`apiref-custom-memory-allocation`. + + +Locale +====== + +Jansson works fine under any locale. + +However, if the host program is multithreaded and uses ``setlocale()`` +to switch the locale in one thread while Jansson is currently encoding +or decoding JSON data in another thread, the result may be wrong or +the program may even crash. + +Jansson uses locale specific functions for certain string conversions +in the encoder and decoder, and then converts the locale specific +values to/from the JSON representation. This fails if the locale +changes between the string conversion and the locale-to-JSON +conversion. This can only happen in multithreaded programs that use +``setlocale()``, because ``setlocale()`` switches the locale for all +running threads, not only the thread that calls ``setlocale()``. + +If your program uses ``setlocale()`` as described above, consider +using the thread-safe ``uselocale()`` instead. diff --git a/solo-ckpool-source/src/jansson-2.14/doc/tutorial.rst b/solo-ckpool-source/src/jansson-2.14/doc/tutorial.rst new file mode 100644 index 0000000..bb7a6c2 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/doc/tutorial.rst @@ -0,0 +1,288 @@ +.. _tutorial: + +******** +Tutorial +******** + +.. highlight:: c + +In this tutorial, we create a program that fetches the latest commits +of a repository in GitHub_ over the web. `GitHub API`_ uses JSON, so +the result can be parsed using Jansson. + +To stick to the scope of this tutorial, we will only cover the +parts of the program related to handling JSON data. For the best user +experience, the full source code is available: +:download:`github_commits.c`. To compile it (on Unix-like systems with +gcc), use the following command:: + + gcc -o github_commits github_commits.c -ljansson -lcurl + +libcurl_ is used to communicate over the web, so it is required to +compile the program. + +The command line syntax is:: + + github_commits USER REPOSITORY + +``USER`` is a GitHub user ID and ``REPOSITORY`` is the repository +name. Please note that the GitHub API is rate limited, so if you run +the program too many times within a short period of time, the sever +starts to respond with an error. + +.. _GitHub: https://github.com/ +.. _GitHub API: http://developer.github.com/ +.. _libcurl: http://curl.haxx.se/ + + +.. _tutorial-github-commits-api: + +The GitHub Repo Commits API +=========================== + +The `GitHub Repo Commits API`_ is used by sending HTTP requests to +URLs like ``https://api.github.com/repos/USER/REPOSITORY/commits``, +where ``USER`` and ``REPOSITORY`` are the GitHub user ID and the name +of the repository whose commits are to be listed, respectively. + +GitHub responds with a JSON array of the following form: + +.. code-block:: none + + [ + { + "sha": "", + "commit": { + "message": "", + + }, + + }, + { + "sha": "", + "commit": { + "message": "", + + }, + + }, + + ] + +In our program, the HTTP request is sent using the following +function:: + + static char *request(const char *url); + +It takes the URL as a parameter, performs a HTTP GET request, and +returns a newly allocated string that contains the response body. If +the request fails, an error message is printed to stderr and the +return value is *NULL*. For full details, refer to :download:`the code +`, as the actual implementation is not important +here. + +.. _GitHub Repo Commits API: http://developer.github.com/v3/repos/commits/ + +.. _tutorial-the-program: + +The Program +=========== + +First the includes:: + + #include + #include + +Like all the programs using Jansson, we need to include +:file:`jansson.h`. + +The following definitions are used to build the GitHub API request +URL:: + + #define URL_FORMAT "https://api.github.com/repos/%s/%s/commits" + #define URL_SIZE 256 + +The following function is used when formatting the result to find the +first newline in the commit message:: + + /* Return the offset of the first newline in text or the length of + text if there's no newline */ + static int newline_offset(const char *text) + { + const char *newline = strchr(text, '\n'); + if(!newline) + return strlen(text); + else + return (int)(newline - text); + } + +The main function follows. In the beginning, we first declare a bunch +of variables and check the command line parameters:: + + int main(int argc, char *argv[]) + { + size_t i; + char *text; + char url[URL_SIZE]; + + json_t *root; + json_error_t error; + + if(argc != 3) + { + fprintf(stderr, "usage: %s USER REPOSITORY\n\n", argv[0]); + fprintf(stderr, "List commits at USER's REPOSITORY.\n\n"); + return 2; + } + +Then we build the request URL using the user and repository names +given as command line parameters:: + + snprintf(url, URL_SIZE, URL_FORMAT, argv[1], argv[2]); + +This uses the ``URL_SIZE`` and ``URL_FORMAT`` constants defined above. +Now we're ready to actually request the JSON data over the web:: + + text = request(url); + if(!text) + return 1; + +If an error occurs, our function ``request`` prints the error and +returns *NULL*, so it's enough to just return 1 from the main +function. + +Next we'll call :func:`json_loads()` to decode the JSON text we got +as a response:: + + root = json_loads(text, 0, &error); + free(text); + + if(!root) + { + fprintf(stderr, "error: on line %d: %s\n", error.line, error.text); + return 1; + } + +We don't need the JSON text anymore, so we can free the ``text`` +variable right after decoding it. If :func:`json_loads()` fails, it +returns *NULL* and sets error information to the :type:`json_error_t` +structure given as the second parameter. In this case, our program +prints the error information out and returns 1 from the main function. + +Now we're ready to extract the data out of the decoded JSON response. +The structure of the response JSON was explained in section +:ref:`tutorial-github-commits-api`. + +We check that the returned value really is an array:: + + if(!json_is_array(root)) + { + fprintf(stderr, "error: root is not an array\n"); + json_decref(root); + return 1; + } + +Then we proceed to loop over all the commits in the array:: + + for(i = 0; i < json_array_size(root); i++) + { + json_t *data, *sha, *commit, *message; + const char *message_text; + + data = json_array_get(root, i); + if(!json_is_object(data)) + { + fprintf(stderr, "error: commit data %d is not an object\n", i + 1); + json_decref(root); + return 1; + } + ... + +The function :func:`json_array_size()` returns the size of a JSON +array. First, we again declare some variables and then extract the +i'th element of the ``root`` array using :func:`json_array_get()`. +We also check that the resulting value is a JSON object. + +Next we'll extract the commit ID (a hexadecimal SHA-1 sum), +intermediate commit info object, and the commit message from that +object. We also do proper type checks:: + + sha = json_object_get(data, "sha"); + if(!json_is_string(sha)) + { + fprintf(stderr, "error: commit %d: sha is not a string\n", i + 1); + json_decref(root); + return 1; + } + + commit = json_object_get(data, "commit"); + if(!json_is_object(commit)) + { + fprintf(stderr, "error: commit %d: commit is not an object\n", i + 1); + json_decref(root); + return 1; + } + + message = json_object_get(commit, "message"); + if(!json_is_string(message)) + { + fprintf(stderr, "error: commit %d: message is not a string\n", i + 1); + json_decref(root); + return 1; + } + ... + +And finally, we'll print the first 8 characters of the commit ID and +the first line of the commit message. A C-style string is extracted +from a JSON string using :func:`json_string_value()`:: + + message_text = json_string_value(message); + printf("%.8s %.*s\n", + json_string_value(sha), + newline_offset(message_text), + message_text); + } + +After sending the HTTP request, we decoded the JSON text using +:func:`json_loads()`, remember? It returns a *new reference* to the +JSON value it decodes. When we're finished with the value, we'll need +to decrease the reference count using :func:`json_decref()`. This way +Jansson can release the resources:: + + json_decref(root); + return 0; + +For a detailed explanation of reference counting in Jansson, see +:ref:`apiref-reference-count` in :ref:`apiref`. + +The program's ready, let's test it and view the latest commits in +Jansson's repository: + +.. code-block:: shell + + $ ./github_commits akheron jansson + 1581f26a Merge branch '2.3' + aabfd493 load: Change buffer_pos to be a size_t + bd72efbd load: Avoid unexpected behaviour in macro expansion + e8fd3e30 Document and tweak json_load_callback() + 873eddaf Merge pull request #60 from rogerz/contrib + bd2c0c73 Ignore the binary test_load_callback + 17a51a4b Merge branch '2.3' + 09c39adc Add json_load_callback to the list of exported symbols + cbb80baf Merge pull request #57 from rogerz/contrib + 040bd7b0 Add json_load_callback() + 2637faa4 Make test stripping locale independent + <...> + + +Conclusion +========== + +In this tutorial, we implemented a program that fetches the latest +commits of a GitHub repository using the GitHub Repo Commits API. +Jansson was used to decode the JSON response and to extract the commit +data. + +This tutorial only covered a small part of Jansson. For example, we +did not create or manipulate JSON values at all. Proceed to +:ref:`apiref` to explore all features of Jansson. diff --git a/solo-ckpool-source/src/jansson-2.14/doc/upgrading.rst b/solo-ckpool-source/src/jansson-2.14/doc/upgrading.rst new file mode 100644 index 0000000..94ff7de --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/doc/upgrading.rst @@ -0,0 +1,76 @@ +.. highlight:: c + +****************** +Upgrading from 1.x +****************** + +This chapter lists the backwards incompatible changes introduced in +Jansson 2.0, and the steps that are needed for upgrading your code. + +**The incompatibilities are not dramatic.** The biggest change is that +all decoding functions now require and extra parameter. Most programs +can be modified to work with 2.0 by adding a ``0`` as the second +parameter to all calls of :func:`json_loads()`, :func:`json_loadf()` +and :func:`json_load_file()`. + + +Compatibility +============= + +Jansson 2.0 is backwards incompatible with the Jansson 1.x releases. +It is ABI incompatible, i.e. all programs dynamically linking to the +Jansson library need to be recompiled. It's also API incompatible, +i.e. the source code of programs using Jansson 1.x may need +modifications to make them compile against Jansson 2.0. + +All the 2.x releases are guaranteed to be backwards compatible for +both ABI and API, so no recompilation or source changes are needed +when upgrading from 2.x to 2.y. + + +List of Incompatible Changes +============================ + +**Decoding flags** + For future needs, a ``flags`` parameter was added as the second + parameter to all decoding functions, i.e. :func:`json_loads()`, + :func:`json_loadf()` and :func:`json_load_file()`. All calls to + these functions need to be changed by adding a ``0`` as the second + argument. For example:: + + /* old code */ + json_loads(input, &error); + + /* new code */ + json_loads(input, 0, &error); + + +**Underlying type of JSON integers** + The underlying C type of JSON integers has been changed from + ``int`` to the widest available signed integer type, i.e. + ``long long`` or ``long``, depending on whether + ``long long`` is supported on your system or not. This makes + the whole 64-bit integer range available on most modern systems. + + ``jansson.h`` has a typedef :type:`json_int_t` to the underlying + integer type. ``int`` should still be used in most cases when + dealing with smallish JSON integers, as the compiler handles + implicit type coercion. Only when the full 64-bit range is needed, + :type:`json_int_t` should be explicitly used. + + +**Maximum encoder indentation depth** + The maximum argument of the ``JSON_INDENT()`` macro has been + changed from 255 to 31, to free up bits from the ``flags`` + parameter of :func:`json_dumps()`, :func:`json_dumpf()` and + :func:`json_dump_file()`. If your code uses a bigger indentation + than 31, it needs to be changed. + + +**Unsigned integers in API functions** + Version 2.0 unifies unsigned integer usage in the API. All uses of + ``unsigned int`` and ``unsigned long`` have been replaced + with ``size_t``. This includes flags, container sizes, etc. + This should not require source code changes, as both + ``unsigned int`` and ``unsigned long`` are usually + compatible with ``size_t``. diff --git a/solo-ckpool-source/src/jansson-2.14/examples/README.rst b/solo-ckpool-source/src/jansson-2.14/examples/README.rst new file mode 100644 index 0000000..a7c5274 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/examples/README.rst @@ -0,0 +1,4 @@ +Jansson examples +================ + +This directory contains simple example programs that use Jansson. diff --git a/solo-ckpool-source/src/jansson-2.14/examples/simple_parse.c b/solo-ckpool-source/src/jansson-2.14/examples/simple_parse.c new file mode 100644 index 0000000..a96a0f8 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/examples/simple_parse.c @@ -0,0 +1,200 @@ +/* + * Simple example of parsing and printing JSON using jansson. + * + * SYNOPSIS: + * $ examples/simple_parse + * Type some JSON > [true, false, null, 1, 0.0, -0.0, "", {"name": "barney"}] + * JSON Array of 8 elements: + * JSON True + * JSON False + * JSON Null + * JSON Integer: "1" + * JSON Real: 0.000000 + * JSON Real: -0.000000 + * JSON String: "" + * JSON Object of 1 pair: + * JSON Key: "name" + * JSON String: "barney" + * + * Copyright (c) 2014 Robert Poor + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#include +#include +#include + +/* forward refs */ +void print_json(json_t *root); +void print_json_aux(json_t *element, int indent); +void print_json_indent(int indent); +const char *json_plural(size_t count); +void print_json_object(json_t *element, int indent); +void print_json_array(json_t *element, int indent); +void print_json_string(json_t *element, int indent); +void print_json_integer(json_t *element, int indent); +void print_json_real(json_t *element, int indent); +void print_json_true(json_t *element, int indent); +void print_json_false(json_t *element, int indent); +void print_json_null(json_t *element, int indent); + +void print_json(json_t *root) { print_json_aux(root, 0); } + +void print_json_aux(json_t *element, int indent) { + switch (json_typeof(element)) { + case JSON_OBJECT: + print_json_object(element, indent); + break; + case JSON_ARRAY: + print_json_array(element, indent); + break; + case JSON_STRING: + print_json_string(element, indent); + break; + case JSON_INTEGER: + print_json_integer(element, indent); + break; + case JSON_REAL: + print_json_real(element, indent); + break; + case JSON_TRUE: + print_json_true(element, indent); + break; + case JSON_FALSE: + print_json_false(element, indent); + break; + case JSON_NULL: + print_json_null(element, indent); + break; + default: + fprintf(stderr, "unrecognized JSON type %d\n", json_typeof(element)); + } +} + +void print_json_indent(int indent) { + int i; + for (i = 0; i < indent; i++) { + putchar(' '); + } +} + +const char *json_plural(size_t count) { return count == 1 ? "" : "s"; } + +void print_json_object(json_t *element, int indent) { + size_t size; + const char *key; + json_t *value; + + print_json_indent(indent); + size = json_object_size(element); + + printf("JSON Object of %lld pair%s:\n", (long long)size, json_plural(size)); + json_object_foreach(element, key, value) { + print_json_indent(indent + 2); + printf("JSON Key: \"%s\"\n", key); + print_json_aux(value, indent + 2); + } +} + +void print_json_array(json_t *element, int indent) { + size_t i; + size_t size = json_array_size(element); + print_json_indent(indent); + + printf("JSON Array of %lld element%s:\n", (long long)size, json_plural(size)); + for (i = 0; i < size; i++) { + print_json_aux(json_array_get(element, i), indent + 2); + } +} + +void print_json_string(json_t *element, int indent) { + print_json_indent(indent); + printf("JSON String: \"%s\"\n", json_string_value(element)); +} + +void print_json_integer(json_t *element, int indent) { + print_json_indent(indent); + printf("JSON Integer: \"%" JSON_INTEGER_FORMAT "\"\n", json_integer_value(element)); +} + +void print_json_real(json_t *element, int indent) { + print_json_indent(indent); + printf("JSON Real: %f\n", json_real_value(element)); +} + +void print_json_true(json_t *element, int indent) { + (void)element; + print_json_indent(indent); + printf("JSON True\n"); +} + +void print_json_false(json_t *element, int indent) { + (void)element; + print_json_indent(indent); + printf("JSON False\n"); +} + +void print_json_null(json_t *element, int indent) { + (void)element; + print_json_indent(indent); + printf("JSON Null\n"); +} + +/* + * Parse text into a JSON object. If text is valid JSON, returns a + * json_t structure, otherwise prints and error and returns null. + */ +json_t *load_json(const char *text) { + json_t *root; + json_error_t error; + + root = json_loads(text, 0, &error); + + if (root) { + return root; + } else { + fprintf(stderr, "json error on line %d: %s\n", error.line, error.text); + return (json_t *)0; + } +} + +/* + * Print a prompt and return (by reference) a null-terminated line of + * text. Returns NULL on eof or some error. + */ +char *read_line(char *line, int max_chars) { + printf("Type some JSON > "); + fflush(stdout); + return fgets(line, max_chars, stdin); +} + +/* ================================================================ + * main + */ + +#define MAX_CHARS 4096 + +int main(int argc, char *argv[]) { + char line[MAX_CHARS]; + + if (argc != 1) { + fprintf(stderr, "Usage: %s\n", argv[0]); + exit(-1); + } + + while (read_line(line, MAX_CHARS) != (char *)NULL) { + + /* parse text into JSON structure */ + json_t *root = load_json(line); + + if (root) { + /* print and release the JSON structure */ + print_json(root); + json_decref(root); + } + } + + return 0; +} diff --git a/solo-ckpool-source/src/jansson-2.14/jansson.pc.in b/solo-ckpool-source/src/jansson-2.14/jansson.pc.in new file mode 100644 index 0000000..69c9a43 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/jansson.pc.in @@ -0,0 +1,10 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: Jansson +Description: Library for encoding, decoding and manipulating JSON data +Version: @VERSION@ +Libs: -L${libdir} -ljansson +Cflags: -I${includedir} diff --git a/solo-ckpool-source/src/jansson-2.14/jansson_private_config.h.in b/solo-ckpool-source/src/jansson-2.14/jansson_private_config.h.in new file mode 100644 index 0000000..b8f5097 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/jansson_private_config.h.in @@ -0,0 +1,160 @@ +/* jansson_private_config.h.in. Generated from configure.ac by autoheader. */ + +/* Define to 1 if gcc's __atomic builtins are available */ +#undef HAVE_ATOMIC_BUILTINS + +/* Define to 1 if you have the `close' function. */ +#undef HAVE_CLOSE + +/* Define to 1 if you have the header file. */ +#undef HAVE_DLFCN_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_ENDIAN_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_FCNTL_H + +/* Define to 1 if you have the `getpid' function. */ +#undef HAVE_GETPID + +/* Define to 1 if you have the `gettimeofday' function. */ +#undef HAVE_GETTIMEOFDAY + +/* Define to 1 if you have the header file. */ +#undef HAVE_INTTYPES_H + +/* Define to 1 if you have the `localeconv' function. */ +#undef HAVE_LOCALECONV + +/* Define to 1 if you have the header file. */ +#undef HAVE_LOCALE_H + +/* Define to 1 if the system has the type `long long int'. */ +#undef HAVE_LONG_LONG_INT + +/* Define to 1 if you have the `open' function. */ +#undef HAVE_OPEN + +/* Define to 1 if you have the `read' function. */ +#undef HAVE_READ + +/* Define to 1 if you have the header file. */ +#undef HAVE_SCHED_H + +/* Define to 1 if you have the `sched_yield' function. */ +#undef HAVE_SCHED_YIELD + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDINT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDIO_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDLIB_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRINGS_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRING_H + +/* Define to 1 if you have the `strtoll' function. */ +#undef HAVE_STRTOLL + +/* Define to 1 if gcc's __sync builtins are available */ +#undef HAVE_SYNC_BUILTINS + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_PARAM_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_STAT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_TIME_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_TYPES_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_UNISTD_H + +/* Define to 1 if the system has the type `unsigned long long int'. */ +#undef HAVE_UNSIGNED_LONG_LONG_INT + +/* Number of buckets new object hashtables contain is 2 raised to this power. + E.g. 3 -> 2^3 = 8. */ +#undef INITIAL_HASHTABLE_ORDER + +/* Define to the sub-directory where libtool stores uninstalled libraries. */ +#undef LT_OBJDIR + +/* Name of package */ +#undef PACKAGE + +/* Define to the address where bug reports for this package should be sent. */ +#undef PACKAGE_BUGREPORT + +/* Define to the full name of this package. */ +#undef PACKAGE_NAME + +/* Define to the full name and version of this package. */ +#undef PACKAGE_STRING + +/* Define to the one symbol short name of this package. */ +#undef PACKAGE_TARNAME + +/* Define to the home page for this package. */ +#undef PACKAGE_URL + +/* Define to the version of this package. */ +#undef PACKAGE_VERSION + +/* Define to 1 if all of the C90 standard headers exist (not just the ones + required in a freestanding environment). This macro is provided for + backward compatibility; new code need not use it. */ +#undef STDC_HEADERS + +/* Define to 1 if /dev/urandom should be used for seeding the hash function */ +#undef USE_URANDOM + +/* Define to 1 if CryptGenRandom should be used for seeding the hash function + */ +#undef USE_WINDOWS_CRYPTOAPI + +/* Version number of package */ +#undef VERSION + +/* Define for Solaris 2.5.1 so the uint32_t typedef from , + , or is not used. If the typedef were allowed, the + #define below would cause a syntax error. */ +#undef _UINT32_T + +/* Define for Solaris 2.5.1 so the uint8_t typedef from , + , or is not used. If the typedef were allowed, the + #define below would cause a syntax error. */ +#undef _UINT8_T + +/* Define to `__inline__' or `__inline' if that's what the C compiler + calls it, or to nothing if 'inline' is not supported under any name. */ +#ifndef __cplusplus +#undef inline +#endif + +/* Define to the type of a signed integer type of width exactly 32 bits if + such a type exists and the standard includes do not define it. */ +#undef int32_t + +/* Define to the type of an unsigned integer type of width exactly 16 bits if + such a type exists and the standard includes do not define it. */ +#undef uint16_t + +/* Define to the type of an unsigned integer type of width exactly 32 bits if + such a type exists and the standard includes do not define it. */ +#undef uint32_t + +/* Define to the type of an unsigned integer type of width exactly 8 bits if + such a type exists and the standard includes do not define it. */ +#undef uint8_t diff --git a/solo-ckpool-source/src/jansson-2.14/scripts/clang-format b/solo-ckpool-source/src/jansson-2.14/scripts/clang-format new file mode 100755 index 0000000..d46056c --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/scripts/clang-format @@ -0,0 +1,3 @@ +#!/bin/bash + +find . -type f -a '(' -name '*.c' -o -name '*.h' ')' | xargs clang-format -i diff --git a/solo-ckpool-source/src/jansson-2.14/scripts/clang-format-check b/solo-ckpool-source/src/jansson-2.14/scripts/clang-format-check new file mode 100755 index 0000000..983e55d --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/scripts/clang-format-check @@ -0,0 +1,27 @@ +#!/bin/bash + +CLANG_FORMAT=${CLANG_FORMAT:-clang-format} +CLANG_FORMAT_VERSION=${CLANG_FORMAT_VERSION:-} + +if ! type $CLANG_FORMAT >/dev/null || \ + ! $CLANG_FORMAT --version | grep -q "version ${CLANG_FORMAT_VERSION}"; then + # If running tests, mark this test as skipped. + exit 77 +fi + +errors=0 +paths=$(git ls-files | grep '\.[ch]$') +for path in $paths; do + in=$(cat $path) + out=$($CLANG_FORMAT $path) + + if [ "$in" != "$out" ]; then + diff -u -L $path -L "$path.formatted" $path - <<<$out + errors=1 + fi +done + +if [ $errors -ne 0 ]; then + echo "Formatting errors detected, run ./scripts/clang-format to fix!" + exit 1 +fi diff --git a/solo-ckpool-source/src/jansson-2.14/src/Makefile.am b/solo-ckpool-source/src/jansson-2.14/src/Makefile.am new file mode 100644 index 0000000..63eda32 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/src/Makefile.am @@ -0,0 +1,30 @@ +EXTRA_DIST = jansson.def + +include_HEADERS = jansson.h +nodist_include_HEADERS = jansson_config.h + +lib_LTLIBRARIES = libjansson.la +libjansson_la_SOURCES = \ + dump.c \ + error.c \ + hashtable.c \ + hashtable.h \ + hashtable_seed.c \ + jansson_private.h \ + load.c \ + lookup3.h \ + memory.c \ + pack_unpack.c \ + strbuffer.c \ + strbuffer.h \ + strconv.c \ + utf.c \ + utf.h \ + value.c \ + version.c +libjansson_la_LDFLAGS = \ + -no-undefined \ + -export-symbols-regex '^json_|^jansson_' \ + -version-info 18:0:14 \ + @JSON_SYMVER_LDFLAGS@ \ + @JSON_BSYMBOLIC_LDFLAGS@ diff --git a/solo-ckpool-source/src/jansson-2.14/src/dump.c b/solo-ckpool-source/src/jansson-2.14/src/dump.c new file mode 100644 index 0000000..a86068b --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/src/dump.c @@ -0,0 +1,492 @@ +/* + * Copyright (c) 2009-2016 Petri Lehtinen + * Copyright (c) 2015,2017,2023 Con Kolivas + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#include "jansson_private.h" + +#include +#include +#include +#include +#ifdef HAVE_UNISTD_H +#include +#endif + +#include "jansson.h" +#include "strbuffer.h" +#include "utf.h" + +#define MAX_INTEGER_STR_LENGTH 100 +#define MAX_REAL_STR_LENGTH 100 + +#define FLAGS_TO_INDENT(f) ((f)&0x1F) +#define FLAGS_TO_PRECISION(f) (((f) >> 11) & 0x1F) + +struct buffer { + const size_t size; + size_t used; + char *data; +}; + +static int dump_to_strbuffer(const char *buffer, size_t size, void *data) { + return strbuffer_append_bytes((strbuffer_t *)data, buffer, size); +} + +static int dump_to_buffer(const char *buffer, size_t size, void *data) { + struct buffer *buf = (struct buffer *)data; + + if (buf->used + size <= buf->size) + memcpy(&buf->data[buf->used], buffer, size); + + buf->used += size; + return 0; +} + +static int dump_to_file(const char *buffer, size_t size, void *data) { + FILE *dest = (FILE *)data; + if (fwrite(buffer, size, 1, dest) != 1) + return -1; + return 0; +} + +static int dump_to_fd(const char *buffer, size_t size, void *data) { +#ifdef HAVE_UNISTD_H + int *dest = (int *)data; + if (write(*dest, buffer, size) == (ssize_t)size) + return 0; +#endif + return -1; +} + +/* 32 spaces (the maximum indentation size) */ +static const char whitespace[] = " "; + +static int dump_indent(size_t flags, int depth, int space, json_dump_callback_t dump, + void *data) { + if (FLAGS_TO_INDENT(flags) > 0) { + unsigned int ws_count = FLAGS_TO_INDENT(flags), n_spaces = depth * ws_count; + + if (dump("\n", 1, data)) + return -1; + + while (n_spaces > 0) { + int cur_n = + n_spaces < sizeof whitespace - 1 ? n_spaces : sizeof whitespace - 1; + + if (dump(whitespace, cur_n, data)) + return -1; + + n_spaces -= cur_n; + } + } else if (space && !(flags & JSON_COMPACT)) { + return dump(" ", 1, data); + } + return 0; +} + +static int dump_string(const char *str, size_t len, json_dump_callback_t dump, void *data, + size_t flags) { + const char *pos, *end, *lim; + int32_t codepoint = 0; + + if (dump("\"", 1, data)) + return -1; + + end = pos = str; + lim = str + len; + while (1) { + const char *text; + char seq[13]; + int length; + + while (end < lim) { + end = utf8_iterate(pos, lim - pos, &codepoint, flags & JSON_NO_UTF8); + if (!end) + return -1; + + /* mandatory escape or control char */ + if (codepoint == '\\' || codepoint == '"' || codepoint < 0x20) + break; + + /* slash */ + if ((flags & JSON_ESCAPE_SLASH) && codepoint == '/') + break; + + /* non-ASCII */ + if ((flags & JSON_ENSURE_ASCII) && codepoint > 0x7F) + break; + + pos = end; + } + + if (pos != str) { + if (dump(str, pos - str, data)) + return -1; + } + + if (end == pos) + break; + + /* handle \, /, ", and control codes */ + length = 2; + switch (codepoint) { + case '\\': + text = "\\\\"; + break; + case '\"': + text = "\\\""; + break; + case '\b': + text = "\\b"; + break; + case '\f': + text = "\\f"; + break; + case '\n': + text = "\\n"; + break; + case '\r': + text = "\\r"; + break; + case '\t': + text = "\\t"; + break; + case '/': + text = "\\/"; + break; + default: { + /* codepoint is in BMP */ + if (codepoint < 0x10000) { + snprintf(seq, sizeof(seq), "\\u%04X", (unsigned int)codepoint); + length = 6; + } + + /* not in BMP -> construct a UTF-16 surrogate pair */ + else { + int32_t first, last; + + codepoint -= 0x10000; + first = 0xD800 | ((codepoint & 0xffc00) >> 10); + last = 0xDC00 | (codepoint & 0x003ff); + + snprintf(seq, sizeof(seq), "\\u%04X\\u%04X", (unsigned int)first, + (unsigned int)last); + length = 12; + } + + text = seq; + break; + } + } + + if (dump(text, length, data)) + return -1; + + str = pos = end; + } + + return dump("\"", 1, data); +} + +struct key_len { + const char *key; + int len; +}; + +static int compare_keys(const void *key1, const void *key2) { + const struct key_len *k1 = key1; + const struct key_len *k2 = key2; + const size_t min_size = k1->len < k2->len ? k1->len : k2->len; + int res = memcmp(k1->key, k2->key, min_size); + + if (res) + return res; + + return k1->len - k2->len; +} + +static int do_dump(const json_t *json, size_t flags, int depth, hashtable_t *parents, + json_dump_callback_t dump, void *data) { + int embed = flags & JSON_EMBED; + + flags &= ~JSON_EMBED; + + if (!json) + return -1; + + switch (json_typeof(json)) { + case JSON_NULL: + return dump("null", 4, data); + + case JSON_TRUE: + return dump("true", 4, data); + + case JSON_FALSE: + return dump("false", 5, data); + + case JSON_INTEGER: { + char buffer[MAX_INTEGER_STR_LENGTH]; + int size; + + size = snprintf(buffer, MAX_INTEGER_STR_LENGTH, "%" JSON_INTEGER_FORMAT, + json_integer_value(json)); + if (size < 0 || size >= MAX_INTEGER_STR_LENGTH) + return -1; + + return dump(buffer, size, data); + } + + case JSON_REAL: { + char buffer[MAX_REAL_STR_LENGTH]; + int size; + double value = json_real_value(json); + + size = jsonp_dtostr(buffer, MAX_REAL_STR_LENGTH, value, + FLAGS_TO_PRECISION(flags)); + if (size < 0) + return -1; + + return dump(buffer, size, data); + } + + case JSON_STRING: + return dump_string(json_string_value(json), json_string_length(json), dump, + data, flags); + + case JSON_ARRAY: { + size_t n; + size_t i; + /* Space for "0x", double the sizeof a pointer for the hex and a + * terminator. */ + char key[2 + (sizeof(json) * 2) + 1]; + size_t key_len; + + /* detect circular references */ + if (jsonp_loop_check(parents, json, key, sizeof(key), &key_len)) + return -1; + + n = json_array_size(json); + + if (!embed && dump("[", 1, data)) + return -1; + if (n == 0) { + hashtable_del(parents, key, key_len); + return embed ? 0 : dump("]", 1, data); + } + if (dump_indent(flags, depth + 1, 0, dump, data)) + return -1; + + for (i = 0; i < n; ++i) { + if (do_dump(json_array_get(json, i), flags, depth + 1, parents, dump, + data)) + return -1; + + if (i < n - 1) { + if (dump(",", 1, data) || + dump_indent(flags, depth + 1, 1, dump, data)) + return -1; + } else { + if (dump_indent(flags, depth, 0, dump, data)) + return -1; + } + } + + hashtable_del(parents, key, key_len); + return embed ? 0 : dump("]", 1, data); + } + + case JSON_OBJECT: { + void *iter; + const char *separator; + int separator_length; + char loop_key[LOOP_KEY_LEN]; + size_t loop_key_len; + + if (flags & JSON_COMPACT) { + separator = ":"; + separator_length = 1; + } else { + separator = ": "; + separator_length = 2; + } + + /* detect circular references */ + if (jsonp_loop_check(parents, json, loop_key, sizeof(loop_key), + &loop_key_len)) + return -1; + + iter = json_object_iter((json_t *)json); + + if (!embed && dump("{", 1, data)) + return -1; + if (!iter) { + hashtable_del(parents, loop_key, loop_key_len); + return embed ? 0 : dump("}", 1, data); + } + if (dump_indent(flags, depth + 1, 0, dump, data)) + return -1; + + if (flags & JSON_SORT_KEYS) { + struct key_len *keys; + size_t size, i; + + size = json_object_size(json); + keys = jsonp_malloc(size * sizeof(struct key_len)); + if (!keys) + return -1; + + i = 0; + while (iter) { + struct key_len *keylen = &keys[i]; + + keylen->key = json_object_iter_key(iter); + keylen->len = json_object_iter_key_len(iter); + + iter = json_object_iter_next((json_t *)json, iter); + i++; + } + assert(i == size); + + qsort(keys, size, sizeof(struct key_len), compare_keys); + + for (i = 0; i < size; i++) { + const struct key_len *key; + json_t *value; + + key = &keys[i]; + value = json_object_getn(json, key->key, key->len); + assert(value); + + dump_string(key->key, key->len, dump, data, flags); + if (dump(separator, separator_length, data) || + do_dump(value, flags, depth + 1, parents, dump, data)) { + jsonp_free(keys); + return -1; + } + + if (i < size - 1) { + if (dump(",", 1, data) || + dump_indent(flags, depth + 1, 1, dump, data)) { + jsonp_free(keys); + return -1; + } + } else { + if (dump_indent(flags, depth, 0, dump, data)) { + jsonp_free(keys); + return -1; + } + } + } + + jsonp_free(keys); + } else { + /* Don't sort keys */ + + while (iter) { + void *next = json_object_iter_next((json_t *)json, iter); + const char *key = json_object_iter_key(iter); + const size_t key_len = json_object_iter_key_len(iter); + + dump_string(key, key_len, dump, data, flags); + if (dump(separator, separator_length, data) || + do_dump(json_object_iter_value(iter), flags, depth + 1, parents, + dump, data)) + return -1; + + if (next) { + if (dump(",", 1, data) || + dump_indent(flags, depth + 1, 1, dump, data)) + return -1; + } else { + if (dump_indent(flags, depth, 0, dump, data)) + return -1; + } + + iter = next; + } + } + + hashtable_del(parents, loop_key, loop_key_len); + return embed ? 0 : dump("}", 1, data); + } + + default: + /* not reached */ + return -1; + } +} + +char *json_dumps(const json_t *json, size_t flags) { + strbuffer_t strbuff; + char *result; + + if (strbuffer_init(&strbuff)) + return NULL; + + if (json_dump_callback(json, dump_to_strbuffer, (void *)&strbuff, flags)) + result = NULL; + else if (flags & JSON_EOL) + result = jsonp_eolstrsteal(&strbuff); + else + result = jsonp_strsteal(&strbuff); + + return result; +} + +size_t json_dumpb(const json_t *json, char *buffer, size_t size, size_t flags) { + struct buffer buf = {size, 0, buffer}; + + if (json_dump_callback(json, dump_to_buffer, (void *)&buf, flags)) + return 0; + + return buf.used; +} + +int json_dumpf(const json_t *json, FILE *output, size_t flags) { + return json_dump_callback(json, dump_to_file, (void *)output, flags); +} + +int json_dumpfd(const json_t *json, int output, size_t flags) { + return json_dump_callback(json, dump_to_fd, (void *)&output, flags); +} + +int json_dump_file(const json_t *json, const char *path, size_t flags) { + int result; + + FILE *output = fopen(path, "w"); + if (!output) + return -1; + + result = json_dumpf(json, output, flags); + + if (fclose(output) != 0) + return -1; + + return result; +} + +int json_dump_callback(const json_t *json, json_dump_callback_t callback, void *data, + size_t flags) { + int res; + hashtable_t parents_set; + + if (!(flags & JSON_ENCODE_ANY)) { + if (!json_is_array(json) && !json_is_object(json)) + return -1; + } + + if (hashtable_init(&parents_set)) + return -1; + res = do_dump(json, flags, 0, &parents_set, callback, data); + hashtable_close(&parents_set); + + return res; +} diff --git a/solo-ckpool-source/src/jansson-2.14/src/error.c b/solo-ckpool-source/src/jansson-2.14/src/error.c new file mode 100644 index 0000000..14d0047 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/src/error.c @@ -0,0 +1,59 @@ +#include "jansson_private.h" +#include + +void jsonp_error_init(json_error_t *error, const char *source) { + if (error) { + error->text[0] = '\0'; + error->line = -1; + error->column = -1; + error->position = 0; + if (source) + jsonp_error_set_source(error, source); + else + error->source[0] = '\0'; + } +} + +void jsonp_error_set_source(json_error_t *error, const char *source) { + size_t length; + + if (!error || !source) + return; + + length = strlen(source); + if (length < JSON_ERROR_SOURCE_LENGTH) + strncpy(error->source, source, length + 1); + else { + size_t extra = length - JSON_ERROR_SOURCE_LENGTH + 4; + memcpy(error->source, "...", 3); + strncpy(error->source + 3, source + extra, length - extra + 1); + } +} + +void jsonp_error_set(json_error_t *error, int line, int column, size_t position, + enum json_error_code code, const char *msg, ...) { + va_list ap; + + va_start(ap, msg); + jsonp_error_vset(error, line, column, position, code, msg, ap); + va_end(ap); +} + +void jsonp_error_vset(json_error_t *error, int line, int column, size_t position, + enum json_error_code code, const char *msg, va_list ap) { + if (!error) + return; + + if (error->text[0] != '\0') { + /* error already set */ + return; + } + + error->line = line; + error->column = column; + error->position = (int)position; + + vsnprintf(error->text, JSON_ERROR_TEXT_LENGTH - 1, msg, ap); + error->text[JSON_ERROR_TEXT_LENGTH - 2] = '\0'; + error->text[JSON_ERROR_TEXT_LENGTH - 1] = code; +} diff --git a/solo-ckpool-source/src/jansson-2.14/src/hashtable.c b/solo-ckpool-source/src/jansson-2.14/src/hashtable.c new file mode 100644 index 0000000..1508d74 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/src/hashtable.c @@ -0,0 +1,340 @@ +/* + * Copyright (c) 2009-2016 Petri Lehtinen + * + * This library is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#if HAVE_CONFIG_H +#include +#endif + +#include +#include + +#if HAVE_STDINT_H +#include +#endif + +#include "hashtable.h" +#include "jansson_private.h" /* for container_of() */ +#include /* for JSON_INLINE */ + +#ifndef INITIAL_HASHTABLE_ORDER +#define INITIAL_HASHTABLE_ORDER 3 +#endif + +typedef struct hashtable_list list_t; +typedef struct hashtable_pair pair_t; +typedef struct hashtable_bucket bucket_t; + +extern volatile uint32_t hashtable_seed; + +/* Implementation of the hash function */ +#include "lookup3.h" + +#define list_to_pair(list_) container_of(list_, pair_t, list) +#define ordered_list_to_pair(list_) container_of(list_, pair_t, ordered_list) +#define hash_str(key, len) ((size_t)hashlittle((key), len, hashtable_seed)) + +static JSON_INLINE void list_init(list_t *list) { + list->next = list; + list->prev = list; +} + +static JSON_INLINE void list_insert(list_t *list, list_t *node) { + node->next = list; + node->prev = list->prev; + list->prev->next = node; + list->prev = node; +} + +static JSON_INLINE void list_remove(list_t *list) { + list->prev->next = list->next; + list->next->prev = list->prev; +} + +static JSON_INLINE int bucket_is_empty(hashtable_t *hashtable, bucket_t *bucket) { + return bucket->first == &hashtable->list && bucket->first == bucket->last; +} + +static void insert_to_bucket(hashtable_t *hashtable, bucket_t *bucket, list_t *list) { + if (bucket_is_empty(hashtable, bucket)) { + list_insert(&hashtable->list, list); + bucket->first = bucket->last = list; + } else { + list_insert(bucket->first, list); + bucket->first = list; + } +} + +static pair_t *hashtable_find_pair(hashtable_t *hashtable, bucket_t *bucket, + const char *key, size_t key_len, size_t hash) { + list_t *list; + pair_t *pair; + + if (bucket_is_empty(hashtable, bucket)) + return NULL; + + list = bucket->first; + while (1) { + pair = list_to_pair(list); + if (pair->hash == hash && pair->key_len == key_len && + memcmp(pair->key, key, key_len) == 0) + return pair; + + if (list == bucket->last) + break; + + list = list->next; + } + + return NULL; +} + +/* returns 0 on success, -1 if key was not found */ +static int hashtable_do_del(hashtable_t *hashtable, const char *key, size_t key_len, + size_t hash) { + pair_t *pair; + bucket_t *bucket; + size_t index; + + index = hash & hashmask(hashtable->order); + bucket = &hashtable->buckets[index]; + + pair = hashtable_find_pair(hashtable, bucket, key, key_len, hash); + if (!pair) + return -1; + + if (&pair->list == bucket->first && &pair->list == bucket->last) + bucket->first = bucket->last = &hashtable->list; + + else if (&pair->list == bucket->first) + bucket->first = pair->list.next; + + else if (&pair->list == bucket->last) + bucket->last = pair->list.prev; + + list_remove(&pair->list); + list_remove(&pair->ordered_list); + json_decref(pair->value); + + jsonp_free(pair); + hashtable->size--; + + return 0; +} + +static void hashtable_do_clear(hashtable_t *hashtable) { + list_t *list, *next; + pair_t *pair; + + for (list = hashtable->list.next; list != &hashtable->list; list = next) { + next = list->next; + pair = list_to_pair(list); + json_decref(pair->value); + jsonp_free(pair); + } +} + +static int hashtable_do_rehash(hashtable_t *hashtable) { + list_t *list, *next; + pair_t *pair; + size_t i, index, new_size, new_order; + struct hashtable_bucket *new_buckets; + + new_order = hashtable->order + 1; + new_size = hashsize(new_order); + + new_buckets = jsonp_malloc(new_size * sizeof(bucket_t)); + if (!new_buckets) + return -1; + + jsonp_free(hashtable->buckets); + hashtable->buckets = new_buckets; + hashtable->order = new_order; + + for (i = 0; i < hashsize(hashtable->order); i++) { + hashtable->buckets[i].first = hashtable->buckets[i].last = &hashtable->list; + } + + list = hashtable->list.next; + list_init(&hashtable->list); + + for (; list != &hashtable->list; list = next) { + next = list->next; + pair = list_to_pair(list); + index = pair->hash % new_size; + insert_to_bucket(hashtable, &hashtable->buckets[index], &pair->list); + } + + return 0; +} + +int hashtable_init(hashtable_t *hashtable) { + size_t i; + + hashtable->size = 0; + hashtable->order = INITIAL_HASHTABLE_ORDER; + hashtable->buckets = jsonp_malloc(hashsize(hashtable->order) * sizeof(bucket_t)); + if (!hashtable->buckets) + return -1; + + list_init(&hashtable->list); + list_init(&hashtable->ordered_list); + + for (i = 0; i < hashsize(hashtable->order); i++) { + hashtable->buckets[i].first = hashtable->buckets[i].last = &hashtable->list; + } + + return 0; +} + +void hashtable_close(hashtable_t *hashtable) { + hashtable_do_clear(hashtable); + jsonp_free(hashtable->buckets); +} + +static pair_t *init_pair(json_t *value, const char *key, size_t key_len, size_t hash) { + pair_t *pair; + + /* offsetof(...) returns the size of pair_t without the last, + flexible member. This way, the correct amount is + allocated. */ + + if (key_len >= (size_t)-1 - offsetof(pair_t, key)) { + /* Avoid an overflow if the key is very long */ + return NULL; + } + + pair = jsonp_malloc(offsetof(pair_t, key) + key_len + 1); + + if (!pair) + return NULL; + + pair->hash = hash; + memcpy(pair->key, key, key_len); + pair->key[key_len] = '\0'; + pair->key_len = key_len; + pair->value = value; + + list_init(&pair->list); + list_init(&pair->ordered_list); + + return pair; +} + +int hashtable_set(hashtable_t *hashtable, const char *key, size_t key_len, + json_t *value) { + pair_t *pair; + bucket_t *bucket; + size_t hash, index; + + /* rehash if the load ratio exceeds 1 */ + if (hashtable->size >= hashsize(hashtable->order)) + if (hashtable_do_rehash(hashtable)) + return -1; + + hash = hash_str(key, key_len); + index = hash & hashmask(hashtable->order); + bucket = &hashtable->buckets[index]; + pair = hashtable_find_pair(hashtable, bucket, key, key_len, hash); + + if (pair) { + json_decref(pair->value); + pair->value = value; + } else { + pair = init_pair(value, key, key_len, hash); + + if (!pair) + return -1; + + insert_to_bucket(hashtable, bucket, &pair->list); + list_insert(&hashtable->ordered_list, &pair->ordered_list); + + hashtable->size++; + } + return 0; +} + +void *hashtable_get(hashtable_t *hashtable, const char *key, size_t key_len) { + pair_t *pair; + size_t hash; + bucket_t *bucket; + + hash = hash_str(key, key_len); + bucket = &hashtable->buckets[hash & hashmask(hashtable->order)]; + + pair = hashtable_find_pair(hashtable, bucket, key, key_len, hash); + if (!pair) + return NULL; + + return pair->value; +} + +int hashtable_del(hashtable_t *hashtable, const char *key, size_t key_len) { + size_t hash = hash_str(key, key_len); + return hashtable_do_del(hashtable, key, key_len, hash); +} + +void hashtable_clear(hashtable_t *hashtable) { + size_t i; + + hashtable_do_clear(hashtable); + + for (i = 0; i < hashsize(hashtable->order); i++) { + hashtable->buckets[i].first = hashtable->buckets[i].last = &hashtable->list; + } + + list_init(&hashtable->list); + list_init(&hashtable->ordered_list); + hashtable->size = 0; +} + +void *hashtable_iter(hashtable_t *hashtable) { + return hashtable_iter_next(hashtable, &hashtable->ordered_list); +} + +void *hashtable_iter_at(hashtable_t *hashtable, const char *key, size_t key_len) { + pair_t *pair; + size_t hash; + bucket_t *bucket; + + hash = hash_str(key, key_len); + bucket = &hashtable->buckets[hash & hashmask(hashtable->order)]; + + pair = hashtable_find_pair(hashtable, bucket, key, key_len, hash); + if (!pair) + return NULL; + + return &pair->ordered_list; +} + +void *hashtable_iter_next(hashtable_t *hashtable, void *iter) { + list_t *list = (list_t *)iter; + if (list->next == &hashtable->ordered_list) + return NULL; + return list->next; +} + +void *hashtable_iter_key(void *iter) { + pair_t *pair = ordered_list_to_pair((list_t *)iter); + return pair->key; +} + +size_t hashtable_iter_key_len(void *iter) { + pair_t *pair = ordered_list_to_pair((list_t *)iter); + return pair->key_len; +} + +void *hashtable_iter_value(void *iter) { + pair_t *pair = ordered_list_to_pair((list_t *)iter); + return pair->value; +} + +void hashtable_iter_set(void *iter, json_t *value) { + pair_t *pair = ordered_list_to_pair((list_t *)iter); + + json_decref(pair->value); + pair->value = value; +} diff --git a/solo-ckpool-source/src/jansson-2.14/src/hashtable.h b/solo-ckpool-source/src/jansson-2.14/src/hashtable.h new file mode 100644 index 0000000..03a1f5a --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/src/hashtable.h @@ -0,0 +1,186 @@ +/* + * Copyright (c) 2009-2016 Petri Lehtinen + * + * This library is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef HASHTABLE_H +#define HASHTABLE_H + +#include "jansson.h" +#include + +struct hashtable_list { + struct hashtable_list *prev; + struct hashtable_list *next; +}; + +/* "pair" may be a bit confusing a name, but think of it as a + key-value pair. In this case, it just encodes some extra data, + too */ +struct hashtable_pair { + struct hashtable_list list; + struct hashtable_list ordered_list; + size_t hash; + json_t *value; + size_t key_len; + char key[1]; +}; + +struct hashtable_bucket { + struct hashtable_list *first; + struct hashtable_list *last; +}; + +typedef struct hashtable { + size_t size; + struct hashtable_bucket *buckets; + size_t order; /* hashtable has pow(2, order) buckets */ + struct hashtable_list list; + struct hashtable_list ordered_list; +} hashtable_t; + +#define hashtable_key_to_iter(key_) \ + (&(container_of(key_, struct hashtable_pair, key)->ordered_list)) + +/** + * hashtable_init - Initialize a hashtable object + * + * @hashtable: The (statically allocated) hashtable object + * + * Initializes a statically allocated hashtable object. The object + * should be cleared with hashtable_close when it's no longer used. + * + * Returns 0 on success, -1 on error (out of memory). + */ +int hashtable_init(hashtable_t *hashtable) JANSSON_ATTRS((warn_unused_result)); + +/** + * hashtable_close - Release all resources used by a hashtable object + * + * @hashtable: The hashtable + * + * Destroys a statically allocated hashtable object. + */ +void hashtable_close(hashtable_t *hashtable); + +/** + * hashtable_set - Add/modify value in hashtable + * + * @hashtable: The hashtable object + * @key: The key + * @key: The length of key + * @serial: For addition order of keys + * @value: The value + * + * If a value with the given key already exists, its value is replaced + * with the new value. Value is "stealed" in the sense that hashtable + * doesn't increment its refcount but decreases the refcount when the + * value is no longer needed. + * + * Returns 0 on success, -1 on failure (out of memory). + */ +int hashtable_set(hashtable_t *hashtable, const char *key, size_t key_len, json_t *value); + +/** + * hashtable_get - Get a value associated with a key + * + * @hashtable: The hashtable object + * @key: The key + * @key: The length of key + * + * Returns value if it is found, or NULL otherwise. + */ +void *hashtable_get(hashtable_t *hashtable, const char *key, size_t key_len); + +/** + * hashtable_del - Remove a value from the hashtable + * + * @hashtable: The hashtable object + * @key: The key + * @key: The length of key + * + * Returns 0 on success, or -1 if the key was not found. + */ +int hashtable_del(hashtable_t *hashtable, const char *key, size_t key_len); + +/** + * hashtable_clear - Clear hashtable + * + * @hashtable: The hashtable object + * + * Removes all items from the hashtable. + */ +void hashtable_clear(hashtable_t *hashtable); + +/** + * hashtable_iter - Iterate over hashtable + * + * @hashtable: The hashtable object + * + * Returns an opaque iterator to the first element in the hashtable. + * The iterator should be passed to hashtable_iter_* functions. + * The hashtable items are not iterated over in any particular order. + * + * There's no need to free the iterator in any way. The iterator is + * valid as long as the item that is referenced by the iterator is not + * deleted. Other values may be added or deleted. In particular, + * hashtable_iter_next() may be called on an iterator, and after that + * the key/value pair pointed by the old iterator may be deleted. + */ +void *hashtable_iter(hashtable_t *hashtable); + +/** + * hashtable_iter_at - Return an iterator at a specific key + * + * @hashtable: The hashtable object + * @key: The key that the iterator should point to + * @key: The length of key + * + * Like hashtable_iter() but returns an iterator pointing to a + * specific key. + */ +void *hashtable_iter_at(hashtable_t *hashtable, const char *key, size_t key_len); + +/** + * hashtable_iter_next - Advance an iterator + * + * @hashtable: The hashtable object + * @iter: The iterator + * + * Returns a new iterator pointing to the next element in the + * hashtable or NULL if the whole hastable has been iterated over. + */ +void *hashtable_iter_next(hashtable_t *hashtable, void *iter); + +/** + * hashtable_iter_key - Retrieve the key pointed by an iterator + * + * @iter: The iterator + */ +void *hashtable_iter_key(void *iter); + +/** + * hashtable_iter_key_len - Retrieve the key length pointed by an iterator + * + * @iter: The iterator + */ +size_t hashtable_iter_key_len(void *iter); + +/** + * hashtable_iter_value - Retrieve the value pointed by an iterator + * + * @iter: The iterator + */ +void *hashtable_iter_value(void *iter); + +/** + * hashtable_iter_set - Set the value pointed by an iterator + * + * @iter: The iterator + * @value: The value to set + */ +void hashtable_iter_set(void *iter, json_t *value); + +#endif diff --git a/solo-ckpool-source/src/jansson-2.14/src/hashtable_seed.c b/solo-ckpool-source/src/jansson-2.14/src/hashtable_seed.c new file mode 100644 index 0000000..d156b40 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/src/hashtable_seed.c @@ -0,0 +1,277 @@ +/* Generate sizeof(uint32_t) bytes of as random data as possible to seed + the hash function. +*/ + +#ifdef HAVE_CONFIG_H +#include +#endif + +#include +#include + +#ifdef HAVE_STDINT_H +#include +#endif + +#ifdef HAVE_FCNTL_H +#include +#endif + +#ifdef HAVE_SCHED_H +#include +#endif + +#ifdef HAVE_UNISTD_H +#include +#endif + +#ifdef HAVE_SYS_STAT_H +#include +#endif + +#ifdef HAVE_SYS_TIME_H +#include +#endif + +#ifdef HAVE_SYS_TYPES_H +#include +#endif + +#if defined(_WIN32) +/* For GetModuleHandle(), GetProcAddress() and GetCurrentProcessId() */ +#include +#endif + +#include "jansson.h" + +static uint32_t buf_to_uint32(char *data) { + size_t i; + uint32_t result = 0; + + for (i = 0; i < sizeof(uint32_t); i++) + result = (result << 8) | (unsigned char)data[i]; + + return result; +} + +/* /dev/urandom */ +#if !defined(_WIN32) && defined(USE_URANDOM) +static int seed_from_urandom(uint32_t *seed) { + /* Use unbuffered I/O if we have open(), close() and read(). Otherwise + fall back to fopen() */ + + char data[sizeof(uint32_t)]; + int ok; + +#if defined(HAVE_OPEN) && defined(HAVE_CLOSE) && defined(HAVE_READ) + int urandom; + urandom = open("/dev/urandom", O_RDONLY); + if (urandom == -1) + return 1; + + ok = read(urandom, data, sizeof(uint32_t)) == sizeof(uint32_t); + close(urandom); +#else + FILE *urandom; + + urandom = fopen("/dev/urandom", "rb"); + if (!urandom) + return 1; + + ok = fread(data, 1, sizeof(uint32_t), urandom) == sizeof(uint32_t); + fclose(urandom); +#endif + + if (!ok) + return 1; + + *seed = buf_to_uint32(data); + return 0; +} +#endif + +/* Windows Crypto API */ +#if defined(_WIN32) && defined(USE_WINDOWS_CRYPTOAPI) +#include + +typedef BOOL(WINAPI *CRYPTACQUIRECONTEXTA)(HCRYPTPROV *phProv, LPCSTR pszContainer, + LPCSTR pszProvider, DWORD dwProvType, + DWORD dwFlags); +typedef BOOL(WINAPI *CRYPTGENRANDOM)(HCRYPTPROV hProv, DWORD dwLen, BYTE *pbBuffer); +typedef BOOL(WINAPI *CRYPTRELEASECONTEXT)(HCRYPTPROV hProv, DWORD dwFlags); + +static int seed_from_windows_cryptoapi(uint32_t *seed) { + HINSTANCE hAdvAPI32 = NULL; + CRYPTACQUIRECONTEXTA pCryptAcquireContext = NULL; + CRYPTGENRANDOM pCryptGenRandom = NULL; + CRYPTRELEASECONTEXT pCryptReleaseContext = NULL; + HCRYPTPROV hCryptProv = 0; + BYTE data[sizeof(uint32_t)]; + int ok; + + hAdvAPI32 = GetModuleHandle(TEXT("advapi32.dll")); + if (hAdvAPI32 == NULL) + return 1; + + pCryptAcquireContext = + (CRYPTACQUIRECONTEXTA)GetProcAddress(hAdvAPI32, "CryptAcquireContextA"); + if (!pCryptAcquireContext) + return 1; + + pCryptGenRandom = (CRYPTGENRANDOM)GetProcAddress(hAdvAPI32, "CryptGenRandom"); + if (!pCryptGenRandom) + return 1; + + pCryptReleaseContext = + (CRYPTRELEASECONTEXT)GetProcAddress(hAdvAPI32, "CryptReleaseContext"); + if (!pCryptReleaseContext) + return 1; + + if (!pCryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL, + CRYPT_VERIFYCONTEXT)) + return 1; + + ok = pCryptGenRandom(hCryptProv, sizeof(uint32_t), data); + pCryptReleaseContext(hCryptProv, 0); + + if (!ok) + return 1; + + *seed = buf_to_uint32((char *)data); + return 0; +} +#endif + +/* gettimeofday() and getpid() */ +static int seed_from_timestamp_and_pid(uint32_t *seed) { +#ifdef HAVE_GETTIMEOFDAY + /* XOR of seconds and microseconds */ + struct timeval tv; + gettimeofday(&tv, NULL); + *seed = (uint32_t)tv.tv_sec ^ (uint32_t)tv.tv_usec; +#else + /* Seconds only */ + *seed = (uint32_t)time(NULL); +#endif + + /* XOR with PID for more randomness */ +#if defined(_WIN32) + *seed ^= (uint32_t)GetCurrentProcessId(); +#elif defined(HAVE_GETPID) + *seed ^= (uint32_t)getpid(); +#endif + + return 0; +} + +static uint32_t generate_seed() { + uint32_t seed = 0; + int done = 0; + +#if !defined(_WIN32) && defined(USE_URANDOM) + if (seed_from_urandom(&seed) == 0) + done = 1; +#endif + +#if defined(_WIN32) && defined(USE_WINDOWS_CRYPTOAPI) + if (seed_from_windows_cryptoapi(&seed) == 0) + done = 1; +#endif + + if (!done) { + /* Fall back to timestamp and PID if no better randomness is + available */ + seed_from_timestamp_and_pid(&seed); + } + + /* Make sure the seed is never zero */ + if (seed == 0) + seed = 1; + + return seed; +} + +volatile uint32_t hashtable_seed = 0; + +#if defined(HAVE_ATOMIC_BUILTINS) && (defined(HAVE_SCHED_YIELD) || !defined(_WIN32)) +static volatile char seed_initialized = 0; + +void json_object_seed(size_t seed) { + uint32_t new_seed = (uint32_t)seed; + + if (hashtable_seed == 0) { + if (__atomic_test_and_set(&seed_initialized, __ATOMIC_RELAXED) == 0) { + /* Do the seeding ourselves */ + if (new_seed == 0) + new_seed = generate_seed(); + + __atomic_store_n(&hashtable_seed, new_seed, __ATOMIC_RELEASE); + } else { + /* Wait for another thread to do the seeding */ + do { +#ifdef HAVE_SCHED_YIELD + sched_yield(); +#endif + } while (__atomic_load_n(&hashtable_seed, __ATOMIC_ACQUIRE) == 0); + } + } +} +#elif defined(HAVE_SYNC_BUILTINS) && (defined(HAVE_SCHED_YIELD) || !defined(_WIN32)) +void json_object_seed(size_t seed) { + uint32_t new_seed = (uint32_t)seed; + + if (hashtable_seed == 0) { + if (new_seed == 0) { + /* Explicit synchronization fences are not supported by the + __sync builtins, so every thread getting here has to + generate the seed value. + */ + new_seed = generate_seed(); + } + + do { + if (__sync_bool_compare_and_swap(&hashtable_seed, 0, new_seed)) { + /* We were the first to seed */ + break; + } else { + /* Wait for another thread to do the seeding */ +#ifdef HAVE_SCHED_YIELD + sched_yield(); +#endif + } + } while (hashtable_seed == 0); + } +} +#elif defined(_WIN32) +static long seed_initialized = 0; +void json_object_seed(size_t seed) { + uint32_t new_seed = (uint32_t)seed; + + if (hashtable_seed == 0) { + if (InterlockedIncrement(&seed_initialized) == 1) { + /* Do the seeding ourselves */ + if (new_seed == 0) + new_seed = generate_seed(); + + hashtable_seed = new_seed; + } else { + /* Wait for another thread to do the seeding */ + do { + SwitchToThread(); + } while (hashtable_seed == 0); + } + } +} +#else +/* Fall back to a thread-unsafe version */ +void json_object_seed(size_t seed) { + uint32_t new_seed = (uint32_t)seed; + + if (hashtable_seed == 0) { + if (new_seed == 0) + new_seed = generate_seed(); + + hashtable_seed = new_seed; + } +} +#endif diff --git a/solo-ckpool-source/src/jansson-2.14/src/jansson.def b/solo-ckpool-source/src/jansson-2.14/src/jansson.def new file mode 100644 index 0000000..5c76c2f --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/src/jansson.def @@ -0,0 +1,83 @@ +EXPORTS + json_delete + json_true + json_false + json_null + json_sprintf + json_vsprintf + json_string + json_stringn + json_string_nocheck + json_stringn_nocheck + json_string_value + json_string_length + json_string_set + json_string_setn + json_string_set_nocheck + json_string_setn_nocheck + json_integer + json_integer_value + json_integer_set + json_real + json_real_value + json_real_set + json_number_value + json_array + json_array_size + json_array_get + json_array_set_new + json_array_append_new + json_array_insert_new + json_array_remove + json_array_clear + json_array_extend + json_object + json_object_size + json_object_get + json_object_getn + json_object_set_new + json_object_setn_new + json_object_set_new_nocheck + json_object_setn_new_nocheck + json_object_del + json_object_deln + json_object_clear + json_object_update + json_object_update_existing + json_object_update_missing + json_object_update_recursive + json_object_iter + json_object_iter_at + json_object_iter_next + json_object_iter_key + json_object_iter_key_len + json_object_iter_value + json_object_iter_set_new + json_object_key_to_iter + json_object_seed + json_dumps + json_dumpb + json_dumpf + json_dumpfd + json_dump_file + json_dump_callback + json_loads + json_loadb + json_loadf + json_loadfd + json_load_file + json_load_callback + json_equal + json_copy + json_deep_copy + json_pack + json_pack_ex + json_vpack_ex + json_unpack + json_unpack_ex + json_vunpack_ex + json_set_alloc_funcs + json_get_alloc_funcs + jansson_version_str + jansson_version_cmp + diff --git a/solo-ckpool-source/src/jansson-2.14/src/jansson.h b/solo-ckpool-source/src/jansson-2.14/src/jansson.h new file mode 100644 index 0000000..ddc3598 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/src/jansson.h @@ -0,0 +1,422 @@ +/* + * Copyright (c) 2009-2016 Petri Lehtinen + * Copyright (c) 2015,2017,2023 Con Kolivas + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef JANSSON_H +#define JANSSON_H + +#include +#include +#include /* for size_t */ + +#include "jansson_config.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* version */ + +#define JANSSON_MAJOR_VERSION 2 +#define JANSSON_MINOR_VERSION 14 +#define JANSSON_MICRO_VERSION 0 + +/* Micro version is omitted if it's 0 */ +#define JANSSON_VERSION "2.14" + +/* Version as a 3-byte hex number, e.g. 0x010201 == 1.2.1. Use this + for numeric comparisons, e.g. #if JANSSON_VERSION_HEX >= ... */ +#define JANSSON_VERSION_HEX \ + ((JANSSON_MAJOR_VERSION << 16) | (JANSSON_MINOR_VERSION << 8) | \ + (JANSSON_MICRO_VERSION << 0)) + +/* If __atomic or __sync builtins are available the library is thread + * safe for all read-only functions plus reference counting. */ +#if JSON_HAVE_ATOMIC_BUILTINS || JSON_HAVE_SYNC_BUILTINS +#define JANSSON_THREAD_SAFE_REFCOUNT 1 +#endif + +#if defined(__GNUC__) || defined(__clang__) +#define JANSSON_ATTRS(x) __attribute__(x) +#else +#define JANSSON_ATTRS(x) +#endif + +/* types */ + +typedef enum { + JSON_OBJECT, + JSON_ARRAY, + JSON_STRING, + JSON_INTEGER, + JSON_REAL, + JSON_TRUE, + JSON_FALSE, + JSON_NULL +} json_type; + +typedef struct json_t { + json_type type; + volatile size_t refcount; +} json_t; + +#ifndef JANSSON_USING_CMAKE /* disabled if using cmake */ +#if JSON_INTEGER_IS_LONG_LONG +#ifdef _WIN32 +#define JSON_INTEGER_FORMAT "I64d" +#else +#define JSON_INTEGER_FORMAT "lld" +#endif +typedef long long json_int_t; +#else +#define JSON_INTEGER_FORMAT "ld" +typedef long json_int_t; +#endif /* JSON_INTEGER_IS_LONG_LONG */ +#endif + +#define json_typeof(json) ((json)->type) +#define json_is_object(json) ((json) && json_typeof(json) == JSON_OBJECT) +#define json_is_array(json) ((json) && json_typeof(json) == JSON_ARRAY) +#define json_is_string(json) ((json) && json_typeof(json) == JSON_STRING) +#define json_is_integer(json) ((json) && json_typeof(json) == JSON_INTEGER) +#define json_is_real(json) ((json) && json_typeof(json) == JSON_REAL) +#define json_is_number(json) (json_is_integer(json) || json_is_real(json)) +#define json_is_true(json) ((json) && json_typeof(json) == JSON_TRUE) +#define json_is_false(json) ((json) && json_typeof(json) == JSON_FALSE) +#define json_boolean_value json_is_true +#define json_is_boolean(json) (json_is_true(json) || json_is_false(json)) +#define json_is_null(json) ((json) && json_typeof(json) == JSON_NULL) + +/* construction, destruction, reference counting */ + +json_t *json_object(void); +json_t *json_array(void); +json_t *json_string(const char *value); +json_t *json_stringn(const char *value, size_t len); +json_t *json_string_nocheck(const char *value); +json_t *json_stringn_nocheck(const char *value, size_t len); +json_t *json_integer(json_int_t value); +json_t *json_real(double value); +json_t *json_true(void); +json_t *json_false(void); +#define json_boolean(val) ((val) ? json_true() : json_false()) +json_t *json_null(void); + +/* do not call JSON_INTERNAL_INCREF or JSON_INTERNAL_DECREF directly */ +#if JSON_HAVE_ATOMIC_BUILTINS +#define JSON_INTERNAL_INCREF(json) \ + __atomic_add_fetch(&json->refcount, 1, __ATOMIC_ACQUIRE) +#define JSON_INTERNAL_DECREF(json) \ + __atomic_sub_fetch(&json->refcount, 1, __ATOMIC_RELEASE) +#elif JSON_HAVE_SYNC_BUILTINS +#define JSON_INTERNAL_INCREF(json) __sync_add_and_fetch(&json->refcount, 1) +#define JSON_INTERNAL_DECREF(json) __sync_sub_and_fetch(&json->refcount, 1) +#else +#define JSON_INTERNAL_INCREF(json) (++json->refcount) +#define JSON_INTERNAL_DECREF(json) (--json->refcount) +#endif + +static JSON_INLINE json_t *json_incref(json_t *json) { + if (json && json->refcount != (size_t)-1) + JSON_INTERNAL_INCREF(json); + return json; +} + +/* do not call json_delete directly */ +void json_delete(json_t *json); + +static JSON_INLINE void json_decref(json_t *json) { + if (json && json->refcount != (size_t)-1 && JSON_INTERNAL_DECREF(json) == 0) + json_delete(json); +} + +#if defined(__GNUC__) || defined(__clang__) +static JSON_INLINE void json_decrefp(json_t **json) { + if (json) { + json_decref(*json); + *json = NULL; + } +} + +#define json_auto_t json_t __attribute__((cleanup(json_decrefp))) +#endif + +/* error reporting */ + +#define JSON_ERROR_TEXT_LENGTH 160 +#define JSON_ERROR_SOURCE_LENGTH 80 + +typedef struct json_error_t { + int line; + int column; + int position; + char source[JSON_ERROR_SOURCE_LENGTH]; + char text[JSON_ERROR_TEXT_LENGTH]; +} json_error_t; + +enum json_error_code { + json_error_unknown, + json_error_out_of_memory, + json_error_stack_overflow, + json_error_cannot_open_file, + json_error_invalid_argument, + json_error_invalid_utf8, + json_error_premature_end_of_input, + json_error_end_of_input_expected, + json_error_invalid_syntax, + json_error_invalid_format, + json_error_wrong_type, + json_error_null_character, + json_error_null_value, + json_error_null_byte_in_key, + json_error_duplicate_key, + json_error_numeric_overflow, + json_error_item_not_found, + json_error_index_out_of_range +}; + +static JSON_INLINE enum json_error_code json_error_code(const json_error_t *e) { + return (enum json_error_code)e->text[JSON_ERROR_TEXT_LENGTH - 1]; +} + +/* getters, setters, manipulation */ + +void json_object_seed(size_t seed); +size_t json_object_size(const json_t *object); +json_t *json_object_get(const json_t *object, const char *key) + JANSSON_ATTRS((warn_unused_result)); +json_t *json_object_getn(const json_t *object, const char *key, size_t key_len) + JANSSON_ATTRS((warn_unused_result)); +int json_object_set_new(json_t *object, const char *key, json_t *value); +int json_object_setn_new(json_t *object, const char *key, size_t key_len, json_t *value); +int json_object_set_new_nocheck(json_t *object, const char *key, json_t *value); +int json_object_setn_new_nocheck(json_t *object, const char *key, size_t key_len, + json_t *value); +int json_object_del(json_t *object, const char *key); +int json_object_deln(json_t *object, const char *key, size_t key_len); +int json_object_clear(json_t *object); +int json_object_update(json_t *object, json_t *other); +int json_object_update_existing(json_t *object, json_t *other); +int json_object_update_missing(json_t *object, json_t *other); +int json_object_update_recursive(json_t *object, json_t *other); +void *json_object_iter(json_t *object); +void *json_object_iter_at(json_t *object, const char *key); +void *json_object_key_to_iter(const char *key); +void *json_object_iter_next(json_t *object, void *iter); +const char *json_object_iter_key(void *iter); +size_t json_object_iter_key_len(void *iter); +json_t *json_object_iter_value(void *iter); +int json_object_iter_set_new(json_t *object, void *iter, json_t *value); + +#define json_object_foreach(object, key, value) \ + for (key = json_object_iter_key(json_object_iter(object)); \ + key && (value = json_object_iter_value(json_object_key_to_iter(key))); \ + key = json_object_iter_key( \ + json_object_iter_next(object, json_object_key_to_iter(key)))) + +#define json_object_keylen_foreach(object, key, key_len, value) \ + for (key = json_object_iter_key(json_object_iter(object)), \ + key_len = json_object_iter_key_len(json_object_key_to_iter(key)); \ + key && (value = json_object_iter_value(json_object_key_to_iter(key))); \ + key = json_object_iter_key( \ + json_object_iter_next(object, json_object_key_to_iter(key))), \ + key_len = json_object_iter_key_len(json_object_key_to_iter(key))) + +#define json_object_foreach_safe(object, n, key, value) \ + for (key = json_object_iter_key(json_object_iter(object)), \ + n = json_object_iter_next(object, json_object_key_to_iter(key)); \ + key && (value = json_object_iter_value(json_object_key_to_iter(key))); \ + key = json_object_iter_key(n), \ + n = json_object_iter_next(object, json_object_key_to_iter(key))) + +#define json_object_keylen_foreach_safe(object, n, key, key_len, value) \ + for (key = json_object_iter_key(json_object_iter(object)), \ + n = json_object_iter_next(object, json_object_key_to_iter(key)), \ + key_len = json_object_iter_key_len(json_object_key_to_iter(key)); \ + key && (value = json_object_iter_value(json_object_key_to_iter(key))); \ + key = json_object_iter_key(n), key_len = json_object_iter_key_len(n), \ + n = json_object_iter_next(object, json_object_key_to_iter(key))) + +#define json_array_foreach(array, index, value) \ + for (index = 0; \ + index < json_array_size(array) && (value = json_array_get(array, index)); \ + index++) + +static JSON_INLINE int json_object_set(json_t *object, const char *key, json_t *value) { + return json_object_set_new(object, key, json_incref(value)); +} + +static JSON_INLINE int json_object_setn(json_t *object, const char *key, size_t key_len, + json_t *value) { + return json_object_setn_new(object, key, key_len, json_incref(value)); +} + +static JSON_INLINE int json_object_set_nocheck(json_t *object, const char *key, + json_t *value) { + return json_object_set_new_nocheck(object, key, json_incref(value)); +} + +static JSON_INLINE int json_object_setn_nocheck(json_t *object, const char *key, + size_t key_len, json_t *value) { + return json_object_setn_new_nocheck(object, key, key_len, json_incref(value)); +} + +static JSON_INLINE int json_object_iter_set(json_t *object, void *iter, json_t *value) { + return json_object_iter_set_new(object, iter, json_incref(value)); +} + +static JSON_INLINE int json_object_update_new(json_t *object, json_t *other) { + int ret = json_object_update(object, other); + json_decref(other); + return ret; +} + +static JSON_INLINE int json_object_update_existing_new(json_t *object, json_t *other) { + int ret = json_object_update_existing(object, other); + json_decref(other); + return ret; +} + +static JSON_INLINE int json_object_update_missing_new(json_t *object, json_t *other) { + int ret = json_object_update_missing(object, other); + json_decref(other); + return ret; +} + +size_t json_array_size(const json_t *array); +json_t *json_array_get(const json_t *array, size_t index) + JANSSON_ATTRS((warn_unused_result)); +int json_array_set_new(json_t *array, size_t index, json_t *value); +int json_array_append_new(json_t *array, json_t *value); +int json_array_insert_new(json_t *array, size_t index, json_t *value); +int json_array_remove(json_t *array, size_t index); +int json_array_clear(json_t *array); +int json_array_extend(json_t *array, json_t *other); + +static JSON_INLINE int json_array_set(json_t *array, size_t ind, json_t *value) { + return json_array_set_new(array, ind, json_incref(value)); +} + +static JSON_INLINE int json_array_append(json_t *array, json_t *value) { + return json_array_append_new(array, json_incref(value)); +} + +static JSON_INLINE int json_array_insert(json_t *array, size_t ind, json_t *value) { + return json_array_insert_new(array, ind, json_incref(value)); +} + +const char *json_string_value(const json_t *string); +size_t json_string_length(const json_t *string); +json_int_t json_integer_value(const json_t *integer); +double json_real_value(const json_t *real); +double json_number_value(const json_t *json); + +int json_string_set(json_t *string, const char *value); +int json_string_setn(json_t *string, const char *value, size_t len); +int json_string_set_nocheck(json_t *string, const char *value); +int json_string_setn_nocheck(json_t *string, const char *value, size_t len); +int json_integer_set(json_t *integer, json_int_t value); +int json_real_set(json_t *real, double value); + +/* pack, unpack */ + +json_t *json_pack(const char *fmt, ...) JANSSON_ATTRS((warn_unused_result)); +json_t *json_pack_ex(json_error_t *error, size_t flags, const char *fmt, ...) + JANSSON_ATTRS((warn_unused_result)); +json_t *json_vpack_ex(json_error_t *error, size_t flags, const char *fmt, va_list ap) + JANSSON_ATTRS((warn_unused_result)); + +#define JSON_VALIDATE_ONLY 0x1 +#define JSON_STRICT 0x2 + +int json_unpack(json_t *root, const char *fmt, ...); +int json_unpack_ex(json_t *root, json_error_t *error, size_t flags, const char *fmt, ...); +int json_vunpack_ex(json_t *root, json_error_t *error, size_t flags, const char *fmt, + va_list ap); + +/* sprintf */ + +json_t *json_sprintf(const char *fmt, ...) + JANSSON_ATTRS((warn_unused_result, format(printf, 1, 2))); +json_t *json_vsprintf(const char *fmt, va_list ap) + JANSSON_ATTRS((warn_unused_result, format(printf, 1, 0))); + +/* equality */ + +int json_equal(const json_t *value1, const json_t *value2); + +/* copying */ + +json_t *json_copy(json_t *value) JANSSON_ATTRS((warn_unused_result)); +json_t *json_deep_copy(const json_t *value) JANSSON_ATTRS((warn_unused_result)); + +/* decoding */ + +#define JSON_REJECT_DUPLICATES 0x1 +#define JSON_DISABLE_EOF_CHECK 0x2 +#define JSON_DECODE_ANY 0x4 +#define JSON_DECODE_INT_AS_REAL 0x8 +#define JSON_ALLOW_NUL 0x10 + +typedef size_t (*json_load_callback_t)(void *buffer, size_t buflen, void *data); + +json_t *json_loads(const char *input, size_t flags, json_error_t *error) + JANSSON_ATTRS((warn_unused_result)); +json_t *json_loadb(const char *buffer, size_t buflen, size_t flags, json_error_t *error) + JANSSON_ATTRS((warn_unused_result)); +json_t *json_loadf(FILE *input, size_t flags, json_error_t *error) + JANSSON_ATTRS((warn_unused_result)); +json_t *json_loadfd(int input, size_t flags, json_error_t *error) + JANSSON_ATTRS((warn_unused_result)); +json_t *json_load_file(const char *path, size_t flags, json_error_t *error) + JANSSON_ATTRS((warn_unused_result)); +json_t *json_load_callback(json_load_callback_t callback, void *data, size_t flags, + json_error_t *error) JANSSON_ATTRS((warn_unused_result)); + +/* encoding */ + +#define JSON_MAX_INDENT 0x1F +#define JSON_INDENT(n) ((n)&JSON_MAX_INDENT) +#define JSON_COMPACT 0x20 +#define JSON_ENSURE_ASCII 0x40 +#define JSON_SORT_KEYS 0x80 +#define JSON_PRESERVE_ORDER 0x100 +#define JSON_ENCODE_ANY 0x200 +#define JSON_ESCAPE_SLASH 0x400 +#define JSON_REAL_PRECISION(n) (((n)&0x1F) << 11) +#define JSON_EMBED 0x10000 +#define JSON_NO_UTF8 0x20000 +#define JSON_EOL 0x40000 + +typedef int (*json_dump_callback_t)(const char *buffer, size_t size, void *data); + +char *json_dumps(const json_t *json, size_t flags) JANSSON_ATTRS((warn_unused_result)); +size_t json_dumpb(const json_t *json, char *buffer, size_t size, size_t flags); +int json_dumpf(const json_t *json, FILE *output, size_t flags); +int json_dumpfd(const json_t *json, int output, size_t flags); +int json_dump_file(const json_t *json, const char *path, size_t flags); +int json_dump_callback(const json_t *json, json_dump_callback_t callback, void *data, + size_t flags); + +/* custom memory allocation */ + +typedef void *(*json_malloc_t)(size_t); +typedef void (*json_free_t)(void *); + +void json_set_alloc_funcs(json_malloc_t malloc_fn, json_free_t free_fn); +void json_get_alloc_funcs(json_malloc_t *malloc_fn, json_free_t *free_fn); + +/* runtime version checking */ + +const char *jansson_version_str(void); +int jansson_version_cmp(int major, int minor, int micro); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/solo-ckpool-source/src/jansson-2.14/src/jansson_config.h.in b/solo-ckpool-source/src/jansson-2.14/src/jansson_config.h.in new file mode 100644 index 0000000..fe692ab --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/src/jansson_config.h.in @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2010-2016 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + * + * + * This file specifies a part of the site-specific configuration for + * Jansson, namely those things that affect the public API in + * jansson.h. + * + * The configure script copies this file to jansson_config.h and + * replaces @var@ substitutions by values that fit your system. If you + * cannot run the configure script, you can do the value substitution + * by hand. + */ + +#ifndef JANSSON_CONFIG_H +#define JANSSON_CONFIG_H + +/* If your compiler supports the inline keyword in C, JSON_INLINE is + defined to `inline', otherwise empty. In C++, the inline is always + supported. */ +#ifdef __cplusplus +#define JSON_INLINE inline +#else +#define JSON_INLINE @json_inline@ +#endif + +/* If your compiler supports the `long long` type and the strtoll() + library function, JSON_INTEGER_IS_LONG_LONG is defined to 1, + otherwise to 0. */ +#define JSON_INTEGER_IS_LONG_LONG @json_have_long_long@ + +/* If locale.h and localeconv() are available, define to 1, + otherwise to 0. */ +#define JSON_HAVE_LOCALECONV @json_have_localeconv@ + +/* If __atomic builtins are available they will be used to manage + reference counts of json_t. */ +#define JSON_HAVE_ATOMIC_BUILTINS @json_have_atomic_builtins@ + +/* If __atomic builtins are not available we try using __sync builtins + to manage reference counts of json_t. */ +#define JSON_HAVE_SYNC_BUILTINS @json_have_sync_builtins@ + +/* Maximum recursion depth for parsing JSON input. + This limits the depth of e.g. array-within-array constructions. */ +#define JSON_PARSER_MAX_DEPTH 2048 + +#endif diff --git a/solo-ckpool-source/src/jansson-2.14/src/jansson_private.h b/solo-ckpool-source/src/jansson-2.14/src/jansson_private.h new file mode 100644 index 0000000..cccbbf5 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/src/jansson_private.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2009-2016 Petri Lehtinen + * Copyright (c) 2015,2017,2023 Con Kolivas + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef JANSSON_PRIVATE_H +#define JANSSON_PRIVATE_H + +#include "hashtable.h" +#include "jansson.h" +#include "jansson_private_config.h" +#include "strbuffer.h" +#include + +#define container_of(ptr_, type_, member_) \ + ((type_ *)((char *)ptr_ - offsetof(type_, member_))) + +/* On some platforms, max() may already be defined */ +#ifndef max +#define max(a, b) ((a) > (b) ? (a) : (b)) +#endif + +/* va_copy is a C99 feature. In C89 implementations, it's sometimes + available as __va_copy. If not, memcpy() should do the trick. */ +#ifndef va_copy +#ifdef __va_copy +#define va_copy __va_copy +#else +#define va_copy(a, b) memcpy(&(a), &(b), sizeof(va_list)) +#endif +#endif + +typedef struct { + json_t json; + hashtable_t hashtable; +} json_object_t; + +typedef struct { + json_t json; + size_t size; + size_t entries; + json_t **table; +} json_array_t; + +typedef struct { + json_t json; + char *value; + size_t length; +} json_string_t; + +typedef struct { + json_t json; + double value; +} json_real_t; + +typedef struct { + json_t json; + json_int_t value; +} json_integer_t; + +#define json_to_object(json_) container_of(json_, json_object_t, json) +#define json_to_array(json_) container_of(json_, json_array_t, json) +#define json_to_string(json_) container_of(json_, json_string_t, json) +#define json_to_real(json_) container_of(json_, json_real_t, json) +#define json_to_integer(json_) container_of(json_, json_integer_t, json) + +/* Create a string by taking ownership of an existing buffer */ +json_t *jsonp_stringn_nocheck_own(const char *value, size_t len); + +/* Error message formatting */ +void jsonp_error_init(json_error_t *error, const char *source); +void jsonp_error_set_source(json_error_t *error, const char *source); +void jsonp_error_set(json_error_t *error, int line, int column, size_t position, + enum json_error_code code, const char *msg, ...); +void jsonp_error_vset(json_error_t *error, int line, int column, size_t position, + enum json_error_code code, const char *msg, va_list ap); + +/* Locale independent string<->double conversions */ +int jsonp_strtod(strbuffer_t *strbuffer, double *out); +int jsonp_dtostr(char *buffer, size_t size, double value, int prec); + +/* Wrappers for custom memory functions */ +void *jsonp_malloc(size_t size) JANSSON_ATTRS((warn_unused_result)); +void _jsonp_free(void **ptr); +#define jsonp_free(ptr) _jsonp_free((void *)&(ptr)) + +char *jsonp_strndup(const char *str, size_t length) JANSSON_ATTRS((warn_unused_result)); +char *jsonp_strdup(const char *str) JANSSON_ATTRS((warn_unused_result)); +char *jsonp_strsteal(strbuffer_t *strbuff); +char *jsonp_eolstrsteal(strbuffer_t *strbuff); + +/* Circular reference check*/ +/* Space for "0x", double the sizeof a pointer for the hex and a terminator. */ +#define LOOP_KEY_LEN (2 + (sizeof(json_t *) * 2) + 1) +int jsonp_loop_check(hashtable_t *parents, const json_t *json, char *key, size_t key_size, + size_t *key_len_out); + +/* Windows compatibility */ +#if defined(_WIN32) || defined(WIN32) +#if defined(_MSC_VER) /* MS compiller */ +#if (_MSC_VER < 1900) && \ + !defined(snprintf) /* snprintf not defined yet & not introduced */ +#define snprintf _snprintf +#endif +#if (_MSC_VER < 1500) && \ + !defined(vsnprintf) /* vsnprintf not defined yet & not introduced */ +#define vsnprintf(b, c, f, a) _vsnprintf(b, c, f, a) +#endif +#else /* Other Windows compiller, old definition */ +#define snprintf _snprintf +#define vsnprintf _vsnprintf +#endif +#endif + +#endif diff --git a/solo-ckpool-source/src/jansson-2.14/src/load.c b/solo-ckpool-source/src/jansson-2.14/src/load.c new file mode 100644 index 0000000..8ae7abd --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/src/load.c @@ -0,0 +1,1106 @@ +/* + * Copyright (c) 2009-2016 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#include "jansson_private.h" + +#include +#include +#include +#include +#include +#include +#ifdef HAVE_UNISTD_H +#include +#endif + +#include "jansson.h" +#include "strbuffer.h" +#include "utf.h" + +#define STREAM_STATE_OK 0 +#define STREAM_STATE_EOF -1 +#define STREAM_STATE_ERROR -2 + +#define TOKEN_INVALID -1 +#define TOKEN_EOF 0 +#define TOKEN_STRING 256 +#define TOKEN_INTEGER 257 +#define TOKEN_REAL 258 +#define TOKEN_TRUE 259 +#define TOKEN_FALSE 260 +#define TOKEN_NULL 261 + +/* Locale independent versions of isxxx() functions */ +#define l_isupper(c) ('A' <= (c) && (c) <= 'Z') +#define l_islower(c) ('a' <= (c) && (c) <= 'z') +#define l_isalpha(c) (l_isupper(c) || l_islower(c)) +#define l_isdigit(c) ('0' <= (c) && (c) <= '9') +#define l_isxdigit(c) \ + (l_isdigit(c) || ('A' <= (c) && (c) <= 'F') || ('a' <= (c) && (c) <= 'f')) + +/* Read one byte from stream, convert to unsigned char, then int, and + return. return EOF on end of file. This corresponds to the + behaviour of fgetc(). */ +typedef int (*get_func)(void *data); + +typedef struct { + get_func get; + void *data; + char buffer[5]; + size_t buffer_pos; + int state; + int line; + int column, last_column; + size_t position; +} stream_t; + +typedef struct { + stream_t stream; + strbuffer_t saved_text; + size_t flags; + size_t depth; + int token; + union { + struct { + char *val; + size_t len; + } string; + json_int_t integer; + double real; + } value; +} lex_t; + +#define stream_to_lex(stream) container_of(stream, lex_t, stream) + +/*** error reporting ***/ + +static void error_set(json_error_t *error, const lex_t *lex, enum json_error_code code, + const char *msg, ...) { + va_list ap; + char msg_text[JSON_ERROR_TEXT_LENGTH]; + char msg_with_context[JSON_ERROR_TEXT_LENGTH]; + + int line = -1, col = -1; + size_t pos = 0; + const char *result = msg_text; + + if (!error) + return; + + va_start(ap, msg); + vsnprintf(msg_text, JSON_ERROR_TEXT_LENGTH, msg, ap); + msg_text[JSON_ERROR_TEXT_LENGTH - 1] = '\0'; + va_end(ap); + + if (lex) { + const char *saved_text = strbuffer_value(&lex->saved_text); + + line = lex->stream.line; + col = lex->stream.column; + pos = lex->stream.position; + + if (saved_text && saved_text[0]) { + if (lex->saved_text.length <= 20) { + snprintf(msg_with_context, JSON_ERROR_TEXT_LENGTH, "%s near '%s'", + msg_text, saved_text); + msg_with_context[JSON_ERROR_TEXT_LENGTH - 1] = '\0'; + result = msg_with_context; + } + } else { + if (code == json_error_invalid_syntax) { + /* More specific error code for premature end of file. */ + code = json_error_premature_end_of_input; + } + if (lex->stream.state == STREAM_STATE_ERROR) { + /* No context for UTF-8 decoding errors */ + result = msg_text; + } else { + snprintf(msg_with_context, JSON_ERROR_TEXT_LENGTH, "%s near end of file", + msg_text); + msg_with_context[JSON_ERROR_TEXT_LENGTH - 1] = '\0'; + result = msg_with_context; + } + } + } + + jsonp_error_set(error, line, col, pos, code, "%s", result); +} + +/*** lexical analyzer ***/ + +static void stream_init(stream_t *stream, get_func get, void *data) { + stream->get = get; + stream->data = data; + stream->buffer[0] = '\0'; + stream->buffer_pos = 0; + + stream->state = STREAM_STATE_OK; + stream->line = 1; + stream->column = 0; + stream->position = 0; +} + +static int stream_get(stream_t *stream, json_error_t *error) { + int c; + + if (stream->state != STREAM_STATE_OK) + return stream->state; + + if (!stream->buffer[stream->buffer_pos]) { + c = stream->get(stream->data); + if (c == EOF) { + stream->state = STREAM_STATE_EOF; + return STREAM_STATE_EOF; + } + + stream->buffer[0] = c; + stream->buffer_pos = 0; + + if (0x80 <= c && c <= 0xFF) { + /* multi-byte UTF-8 sequence */ + size_t i, count; + + count = utf8_check_first(c); + if (!count) + goto out; + + assert(count >= 2); + + for (i = 1; i < count; i++) + stream->buffer[i] = stream->get(stream->data); + + if (!utf8_check_full(stream->buffer, count, NULL)) + goto out; + + stream->buffer[count] = '\0'; + } else + stream->buffer[1] = '\0'; + } + + c = stream->buffer[stream->buffer_pos++]; + + stream->position++; + if (c == '\n') { + stream->line++; + stream->last_column = stream->column; + stream->column = 0; + } else if (utf8_check_first(c)) { + /* track the Unicode character column, so increment only if + this is the first character of a UTF-8 sequence */ + stream->column++; + } + + return c; + +out: + stream->state = STREAM_STATE_ERROR; + error_set(error, stream_to_lex(stream), json_error_invalid_utf8, + "unable to decode byte 0x%x", c); + return STREAM_STATE_ERROR; +} + +static void stream_unget(stream_t *stream, int c) { + if (c == STREAM_STATE_EOF || c == STREAM_STATE_ERROR) + return; + + stream->position--; + if (c == '\n') { + stream->line--; + stream->column = stream->last_column; + } else if (utf8_check_first(c)) + stream->column--; + + assert(stream->buffer_pos > 0); + stream->buffer_pos--; + assert(stream->buffer[stream->buffer_pos] == c); +} + +static int lex_get(lex_t *lex, json_error_t *error) { + return stream_get(&lex->stream, error); +} + +static void lex_save(lex_t *lex, int c) { strbuffer_append_byte(&lex->saved_text, c); } + +static int lex_get_save(lex_t *lex, json_error_t *error) { + int c = stream_get(&lex->stream, error); + if (c != STREAM_STATE_EOF && c != STREAM_STATE_ERROR) + lex_save(lex, c); + return c; +} + +static void lex_unget(lex_t *lex, int c) { stream_unget(&lex->stream, c); } + +static void lex_unget_unsave(lex_t *lex, int c) { + if (c != STREAM_STATE_EOF && c != STREAM_STATE_ERROR) { +/* Since we treat warnings as errors, when assertions are turned + * off the "d" variable would be set but never used. Which is + * treated as an error by GCC. + */ +#ifndef NDEBUG + char d; +#endif + stream_unget(&lex->stream, c); +#ifndef NDEBUG + d = +#endif + strbuffer_pop(&lex->saved_text); + assert(c == d); + } +} + +static void lex_save_cached(lex_t *lex) { + while (lex->stream.buffer[lex->stream.buffer_pos] != '\0') { + lex_save(lex, lex->stream.buffer[lex->stream.buffer_pos]); + lex->stream.buffer_pos++; + lex->stream.position++; + } +} + +static void lex_free_string(lex_t *lex) { + jsonp_free(lex->value.string.val); + lex->value.string.val = NULL; + lex->value.string.len = 0; +} + +/* assumes that str points to 'u' plus at least 4 valid hex digits */ +static int32_t decode_unicode_escape(const char *str) { + int i; + int32_t value = 0; + + assert(str[0] == 'u'); + + for (i = 1; i <= 4; i++) { + char c = str[i]; + value <<= 4; + if (l_isdigit(c)) + value += c - '0'; + else if (l_islower(c)) + value += c - 'a' + 10; + else if (l_isupper(c)) + value += c - 'A' + 10; + else + return -1; + } + + return value; +} + +static void lex_scan_string(lex_t *lex, json_error_t *error) { + int c; + const char *p; + char *t; + int i; + + lex->value.string.val = NULL; + lex->token = TOKEN_INVALID; + + c = lex_get_save(lex, error); + + while (c != '"') { + if (c == STREAM_STATE_ERROR) + goto out; + + else if (c == STREAM_STATE_EOF) { + error_set(error, lex, json_error_premature_end_of_input, + "premature end of input"); + goto out; + } + + else if (0 <= c && c <= 0x1F) { + /* control character */ + lex_unget_unsave(lex, c); + if (c == '\n') + error_set(error, lex, json_error_invalid_syntax, "unexpected newline"); + else + error_set(error, lex, json_error_invalid_syntax, "control character 0x%x", + c); + goto out; + } + + else if (c == '\\') { + c = lex_get_save(lex, error); + if (c == 'u') { + c = lex_get_save(lex, error); + for (i = 0; i < 4; i++) { + if (!l_isxdigit(c)) { + error_set(error, lex, json_error_invalid_syntax, + "invalid escape"); + goto out; + } + c = lex_get_save(lex, error); + } + } else if (c == '"' || c == '\\' || c == '/' || c == 'b' || c == 'f' || + c == 'n' || c == 'r' || c == 't') + c = lex_get_save(lex, error); + else { + error_set(error, lex, json_error_invalid_syntax, "invalid escape"); + goto out; + } + } else + c = lex_get_save(lex, error); + } + + /* the actual value is at most of the same length as the source + string, because: + - shortcut escapes (e.g. "\t") (length 2) are converted to 1 byte + - a single \uXXXX escape (length 6) is converted to at most 3 bytes + - two \uXXXX escapes (length 12) forming an UTF-16 surrogate pair + are converted to 4 bytes + */ + t = jsonp_malloc(lex->saved_text.length + 1); + if (!t) { + /* this is not very nice, since TOKEN_INVALID is returned */ + goto out; + } + lex->value.string.val = t; + + /* + 1 to skip the " */ + p = strbuffer_value(&lex->saved_text) + 1; + + while (*p != '"') { + if (*p == '\\') { + p++; + if (*p == 'u') { + size_t length; + int32_t value; + + value = decode_unicode_escape(p); + if (value < 0) { + error_set(error, lex, json_error_invalid_syntax, + "invalid Unicode escape '%.6s'", p - 1); + goto out; + } + p += 5; + + if (0xD800 <= value && value <= 0xDBFF) { + /* surrogate pair */ + if (*p == '\\' && *(p + 1) == 'u') { + int32_t value2 = decode_unicode_escape(++p); + if (value2 < 0) { + error_set(error, lex, json_error_invalid_syntax, + "invalid Unicode escape '%.6s'", p - 1); + goto out; + } + p += 5; + + if (0xDC00 <= value2 && value2 <= 0xDFFF) { + /* valid second surrogate */ + value = + ((value - 0xD800) << 10) + (value2 - 0xDC00) + 0x10000; + } else { + /* invalid second surrogate */ + error_set(error, lex, json_error_invalid_syntax, + "invalid Unicode '\\u%04X\\u%04X'", value, value2); + goto out; + } + } else { + /* no second surrogate */ + error_set(error, lex, json_error_invalid_syntax, + "invalid Unicode '\\u%04X'", value); + goto out; + } + } else if (0xDC00 <= value && value <= 0xDFFF) { + error_set(error, lex, json_error_invalid_syntax, + "invalid Unicode '\\u%04X'", value); + goto out; + } + + if (utf8_encode(value, t, &length)) + assert(0); + t += length; + } else { + switch (*p) { + case '"': + case '\\': + case '/': + *t = *p; + break; + case 'b': + *t = '\b'; + break; + case 'f': + *t = '\f'; + break; + case 'n': + *t = '\n'; + break; + case 'r': + *t = '\r'; + break; + case 't': + *t = '\t'; + break; + default: + assert(0); + } + t++; + p++; + } + } else + *(t++) = *(p++); + } + *t = '\0'; + lex->value.string.len = t - lex->value.string.val; + lex->token = TOKEN_STRING; + return; + +out: + lex_free_string(lex); +} + +#ifndef JANSSON_USING_CMAKE /* disabled if using cmake */ +#if JSON_INTEGER_IS_LONG_LONG +#ifdef _MSC_VER /* Microsoft Visual Studio */ +#define json_strtoint _strtoi64 +#else +#define json_strtoint strtoll +#endif +#else +#define json_strtoint strtol +#endif +#endif + +static int lex_scan_number(lex_t *lex, int c, json_error_t *error) { + const char *saved_text; + char *end; + double doubleval; + + lex->token = TOKEN_INVALID; + + if (c == '-') + c = lex_get_save(lex, error); + + if (c == '0') { + c = lex_get_save(lex, error); + if (l_isdigit(c)) { + lex_unget_unsave(lex, c); + goto out; + } + } else if (l_isdigit(c)) { + do + c = lex_get_save(lex, error); + while (l_isdigit(c)); + } else { + lex_unget_unsave(lex, c); + goto out; + } + + if (!(lex->flags & JSON_DECODE_INT_AS_REAL) && c != '.' && c != 'E' && c != 'e') { + json_int_t intval; + + lex_unget_unsave(lex, c); + + saved_text = strbuffer_value(&lex->saved_text); + + errno = 0; + intval = json_strtoint(saved_text, &end, 10); + if (errno == ERANGE) { + if (intval < 0) + error_set(error, lex, json_error_numeric_overflow, + "too big negative integer"); + else + error_set(error, lex, json_error_numeric_overflow, "too big integer"); + goto out; + } + + assert(end == saved_text + lex->saved_text.length); + + lex->token = TOKEN_INTEGER; + lex->value.integer = intval; + return 0; + } + + if (c == '.') { + c = lex_get(lex, error); + if (!l_isdigit(c)) { + lex_unget(lex, c); + goto out; + } + lex_save(lex, c); + + do + c = lex_get_save(lex, error); + while (l_isdigit(c)); + } + + if (c == 'E' || c == 'e') { + c = lex_get_save(lex, error); + if (c == '+' || c == '-') + c = lex_get_save(lex, error); + + if (!l_isdigit(c)) { + lex_unget_unsave(lex, c); + goto out; + } + + do + c = lex_get_save(lex, error); + while (l_isdigit(c)); + } + + lex_unget_unsave(lex, c); + + if (jsonp_strtod(&lex->saved_text, &doubleval)) { + error_set(error, lex, json_error_numeric_overflow, "real number overflow"); + goto out; + } + + lex->token = TOKEN_REAL; + lex->value.real = doubleval; + return 0; + +out: + return -1; +} + +static int lex_scan(lex_t *lex, json_error_t *error) { + int c; + + strbuffer_clear(&lex->saved_text); + + if (lex->token == TOKEN_STRING) + lex_free_string(lex); + + do + c = lex_get(lex, error); + while (c == ' ' || c == '\t' || c == '\n' || c == '\r'); + + if (c == STREAM_STATE_EOF) { + lex->token = TOKEN_EOF; + goto out; + } + + if (c == STREAM_STATE_ERROR) { + lex->token = TOKEN_INVALID; + goto out; + } + + lex_save(lex, c); + + if (c == '{' || c == '}' || c == '[' || c == ']' || c == ':' || c == ',') + lex->token = c; + + else if (c == '"') + lex_scan_string(lex, error); + + else if (l_isdigit(c) || c == '-') { + if (lex_scan_number(lex, c, error)) + goto out; + } + + else if (l_isalpha(c)) { + /* eat up the whole identifier for clearer error messages */ + const char *saved_text; + + do + c = lex_get_save(lex, error); + while (l_isalpha(c)); + lex_unget_unsave(lex, c); + + saved_text = strbuffer_value(&lex->saved_text); + + if (strcmp(saved_text, "true") == 0) + lex->token = TOKEN_TRUE; + else if (strcmp(saved_text, "false") == 0) + lex->token = TOKEN_FALSE; + else if (strcmp(saved_text, "null") == 0) + lex->token = TOKEN_NULL; + else + lex->token = TOKEN_INVALID; + } + + else { + /* save the rest of the input UTF-8 sequence to get an error + message of valid UTF-8 */ + lex_save_cached(lex); + lex->token = TOKEN_INVALID; + } + +out: + return lex->token; +} + +static char *lex_steal_string(lex_t *lex, size_t *out_len) { + char *result = NULL; + if (lex->token == TOKEN_STRING) { + result = lex->value.string.val; + *out_len = lex->value.string.len; + lex->value.string.val = NULL; + lex->value.string.len = 0; + } + return result; +} + +static int lex_init(lex_t *lex, get_func get, size_t flags, void *data) { + stream_init(&lex->stream, get, data); + if (strbuffer_init(&lex->saved_text)) + return -1; + + lex->flags = flags; + lex->token = TOKEN_INVALID; + return 0; +} + +static void lex_close(lex_t *lex) { + if (lex->token == TOKEN_STRING) + lex_free_string(lex); + strbuffer_close(&lex->saved_text); +} + +/*** parser ***/ + +static json_t *parse_value(lex_t *lex, size_t flags, json_error_t *error); + +static json_t *parse_object(lex_t *lex, size_t flags, json_error_t *error) { + json_t *object = json_object(); + if (!object) + return NULL; + + lex_scan(lex, error); + if (lex->token == '}') + return object; + + while (1) { + char *key; + size_t len; + json_t *value; + + if (lex->token != TOKEN_STRING) { + error_set(error, lex, json_error_invalid_syntax, "string or '}' expected"); + goto error; + } + + key = lex_steal_string(lex, &len); + if (!key) + return NULL; + if (memchr(key, '\0', len)) { + jsonp_free(key); + error_set(error, lex, json_error_null_byte_in_key, + "NUL byte in object key not supported"); + goto error; + } + + if (flags & JSON_REJECT_DUPLICATES) { + if (json_object_getn(object, key, len)) { + jsonp_free(key); + error_set(error, lex, json_error_duplicate_key, "duplicate object key"); + goto error; + } + } + + lex_scan(lex, error); + if (lex->token != ':') { + jsonp_free(key); + error_set(error, lex, json_error_invalid_syntax, "':' expected"); + goto error; + } + + lex_scan(lex, error); + value = parse_value(lex, flags, error); + if (!value) { + jsonp_free(key); + goto error; + } + + if (json_object_setn_new_nocheck(object, key, len, value)) { + jsonp_free(key); + goto error; + } + + jsonp_free(key); + + lex_scan(lex, error); + if (lex->token != ',') + break; + + lex_scan(lex, error); + } + + if (lex->token != '}') { + error_set(error, lex, json_error_invalid_syntax, "'}' expected"); + goto error; + } + + return object; + +error: + json_decref(object); + return NULL; +} + +static json_t *parse_array(lex_t *lex, size_t flags, json_error_t *error) { + json_t *array = json_array(); + if (!array) + return NULL; + + lex_scan(lex, error); + if (lex->token == ']') + return array; + + while (lex->token) { + json_t *elem = parse_value(lex, flags, error); + if (!elem) + goto error; + + if (json_array_append_new(array, elem)) { + goto error; + } + + lex_scan(lex, error); + if (lex->token != ',') + break; + + lex_scan(lex, error); + } + + if (lex->token != ']') { + error_set(error, lex, json_error_invalid_syntax, "']' expected"); + goto error; + } + + return array; + +error: + json_decref(array); + return NULL; +} + +static json_t *parse_value(lex_t *lex, size_t flags, json_error_t *error) { + json_t *json; + + lex->depth++; + if (lex->depth > JSON_PARSER_MAX_DEPTH) { + error_set(error, lex, json_error_stack_overflow, "maximum parsing depth reached"); + return NULL; + } + + switch (lex->token) { + case TOKEN_STRING: { + const char *value = lex->value.string.val; + size_t len = lex->value.string.len; + + if (!(flags & JSON_ALLOW_NUL)) { + if (memchr(value, '\0', len)) { + error_set(error, lex, json_error_null_character, + "\\u0000 is not allowed without JSON_ALLOW_NUL"); + return NULL; + } + } + + json = jsonp_stringn_nocheck_own(value, len); + lex->value.string.val = NULL; + lex->value.string.len = 0; + break; + } + + case TOKEN_INTEGER: { + json = json_integer(lex->value.integer); + break; + } + + case TOKEN_REAL: { + json = json_real(lex->value.real); + break; + } + + case TOKEN_TRUE: + json = json_true(); + break; + + case TOKEN_FALSE: + json = json_false(); + break; + + case TOKEN_NULL: + json = json_null(); + break; + + case '{': + json = parse_object(lex, flags, error); + break; + + case '[': + json = parse_array(lex, flags, error); + break; + + case TOKEN_INVALID: + error_set(error, lex, json_error_invalid_syntax, "invalid token"); + return NULL; + + default: + error_set(error, lex, json_error_invalid_syntax, "unexpected token"); + return NULL; + } + + if (!json) + return NULL; + + lex->depth--; + return json; +} + +static json_t *parse_json(lex_t *lex, size_t flags, json_error_t *error) { + json_t *result; + + lex->depth = 0; + + lex_scan(lex, error); + if (!(flags & JSON_DECODE_ANY)) { + if (lex->token != '[' && lex->token != '{') { + error_set(error, lex, json_error_invalid_syntax, "'[' or '{' expected"); + return NULL; + } + } + + result = parse_value(lex, flags, error); + if (!result) + return NULL; + + if (!(flags & JSON_DISABLE_EOF_CHECK)) { + lex_scan(lex, error); + if (lex->token != TOKEN_EOF) { + error_set(error, lex, json_error_end_of_input_expected, + "end of file expected"); + json_decref(result); + return NULL; + } + } + + if (error) { + /* Save the position even though there was no error */ + error->position = (int)lex->stream.position; + } + + return result; +} + +typedef struct { + const char *data; + size_t pos; +} string_data_t; + +static int string_get(void *data) { + char c; + string_data_t *stream = (string_data_t *)data; + c = stream->data[stream->pos]; + if (c == '\0') + return EOF; + else { + stream->pos++; + return (unsigned char)c; + } +} + +json_t *json_loads(const char *string, size_t flags, json_error_t *error) { + lex_t lex; + json_t *result; + string_data_t stream_data; + + jsonp_error_init(error, ""); + + if (string == NULL) { + error_set(error, NULL, json_error_invalid_argument, "wrong arguments"); + return NULL; + } + + stream_data.data = string; + stream_data.pos = 0; + + if (lex_init(&lex, string_get, flags, (void *)&stream_data)) + return NULL; + + result = parse_json(&lex, flags, error); + + lex_close(&lex); + return result; +} + +typedef struct { + const char *data; + size_t len; + size_t pos; +} buffer_data_t; + +static int buffer_get(void *data) { + char c; + buffer_data_t *stream = data; + if (stream->pos >= stream->len) + return EOF; + + c = stream->data[stream->pos]; + stream->pos++; + return (unsigned char)c; +} + +json_t *json_loadb(const char *buffer, size_t buflen, size_t flags, json_error_t *error) { + lex_t lex; + json_t *result; + buffer_data_t stream_data; + + jsonp_error_init(error, ""); + + if (buffer == NULL) { + error_set(error, NULL, json_error_invalid_argument, "wrong arguments"); + return NULL; + } + + stream_data.data = buffer; + stream_data.pos = 0; + stream_data.len = buflen; + + if (lex_init(&lex, buffer_get, flags, (void *)&stream_data)) + return NULL; + + result = parse_json(&lex, flags, error); + + lex_close(&lex); + return result; +} + +json_t *json_loadf(FILE *input, size_t flags, json_error_t *error) { + lex_t lex; + const char *source; + json_t *result; + + if (input == stdin) + source = ""; + else + source = ""; + + jsonp_error_init(error, source); + + if (input == NULL) { + error_set(error, NULL, json_error_invalid_argument, "wrong arguments"); + return NULL; + } + + if (lex_init(&lex, (get_func)fgetc, flags, input)) + return NULL; + + result = parse_json(&lex, flags, error); + + lex_close(&lex); + return result; +} + +static int fd_get_func(int *fd) { +#ifdef HAVE_UNISTD_H + uint8_t c; + if (read(*fd, &c, 1) == 1) + return c; +#endif + return EOF; +} + +json_t *json_loadfd(int input, size_t flags, json_error_t *error) { + lex_t lex; + const char *source; + json_t *result; + +#ifdef HAVE_UNISTD_H + if (input == STDIN_FILENO) + source = ""; + else +#endif + source = ""; + + jsonp_error_init(error, source); + + if (input < 0) { + error_set(error, NULL, json_error_invalid_argument, "wrong arguments"); + return NULL; + } + + if (lex_init(&lex, (get_func)fd_get_func, flags, &input)) + return NULL; + + result = parse_json(&lex, flags, error); + + lex_close(&lex); + return result; +} + +json_t *json_load_file(const char *path, size_t flags, json_error_t *error) { + json_t *result; + FILE *fp; + + jsonp_error_init(error, path); + + if (path == NULL) { + error_set(error, NULL, json_error_invalid_argument, "wrong arguments"); + return NULL; + } + + fp = fopen(path, "rb"); + if (!fp) { + error_set(error, NULL, json_error_cannot_open_file, "unable to open %s: %s", path, + strerror(errno)); + return NULL; + } + + result = json_loadf(fp, flags, error); + + fclose(fp); + return result; +} + +#define MAX_BUF_LEN 1024 + +typedef struct { + char data[MAX_BUF_LEN]; + size_t len; + size_t pos; + json_load_callback_t callback; + void *arg; +} callback_data_t; + +static int callback_get(void *data) { + char c; + callback_data_t *stream = data; + + if (stream->pos >= stream->len) { + stream->pos = 0; + stream->len = stream->callback(stream->data, MAX_BUF_LEN, stream->arg); + if (stream->len == 0 || stream->len == (size_t)-1) + return EOF; + } + + c = stream->data[stream->pos]; + stream->pos++; + return (unsigned char)c; +} + +json_t *json_load_callback(json_load_callback_t callback, void *arg, size_t flags, + json_error_t *error) { + lex_t lex; + json_t *result; + + callback_data_t stream_data; + + memset(&stream_data, 0, sizeof(stream_data)); + stream_data.callback = callback; + stream_data.arg = arg; + + jsonp_error_init(error, ""); + + if (callback == NULL) { + error_set(error, NULL, json_error_invalid_argument, "wrong arguments"); + return NULL; + } + + if (lex_init(&lex, (get_func)callback_get, flags, &stream_data)) + return NULL; + + result = parse_json(&lex, flags, error); + + lex_close(&lex); + return result; +} diff --git a/solo-ckpool-source/src/jansson-2.14/src/lookup3.h b/solo-ckpool-source/src/jansson-2.14/src/lookup3.h new file mode 100644 index 0000000..9b39aa1 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/src/lookup3.h @@ -0,0 +1,382 @@ +// clang-format off +/* +------------------------------------------------------------------------------- +lookup3.c, by Bob Jenkins, May 2006, Public Domain. + +These are functions for producing 32-bit hashes for hash table lookup. +hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() +are externally useful functions. Routines to test the hash are included +if SELF_TEST is defined. You can use this free for any purpose. It's in +the public domain. It has no warranty. + +You probably want to use hashlittle(). hashlittle() and hashbig() +hash byte arrays. hashlittle() is is faster than hashbig() on +little-endian machines. Intel and AMD are little-endian machines. +On second thought, you probably want hashlittle2(), which is identical to +hashlittle() except it returns two 32-bit hashes for the price of one. +You could implement hashbig2() if you wanted but I haven't bothered here. + +If you want to find a hash of, say, exactly 7 integers, do + a = i1; b = i2; c = i3; + mix(a,b,c); + a += i4; b += i5; c += i6; + mix(a,b,c); + a += i7; + final(a,b,c); +then use c as the hash value. If you have a variable length array of +4-byte integers to hash, use hashword(). If you have a byte array (like +a character string), use hashlittle(). If you have several byte arrays, or +a mix of things, see the comments above hashlittle(). + +Why is this so big? I read 12 bytes at a time into 3 4-byte integers, +then mix those integers. This is fast (you can do a lot more thorough +mixing with 12*3 instructions on 3 integers than you can with 3 instructions +on 1 byte), but shoehorning those bytes into integers efficiently is messy. +------------------------------------------------------------------------------- +*/ + +#include + +#ifdef HAVE_CONFIG_H +#include +#endif + +#ifdef HAVE_STDINT_H +#include /* defines uint32_t etc */ +#endif + +#ifdef HAVE_SYS_PARAM_H +#include /* attempt to define endianness */ +#endif + +#ifdef HAVE_ENDIAN_H +# include /* attempt to define endianness */ +#endif + +/* + * My best guess at if you are big-endian or little-endian. This may + * need adjustment. + */ +#if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \ + __BYTE_ORDER == __LITTLE_ENDIAN) || \ + (defined(i386) || defined(__i386__) || defined(__i486__) || \ + defined(__i586__) || defined(__i686__) || defined(vax) || defined(MIPSEL)) +# define HASH_LITTLE_ENDIAN 1 +# define HASH_BIG_ENDIAN 0 +#elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \ + __BYTE_ORDER == __BIG_ENDIAN) || \ + (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel)) +# define HASH_LITTLE_ENDIAN 0 +# define HASH_BIG_ENDIAN 1 +#else +# define HASH_LITTLE_ENDIAN 0 +# define HASH_BIG_ENDIAN 0 +#endif + +#define hashsize(n) ((size_t)1<<(n)) +#define hashmask(n) (hashsize(n)-1) +#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k)))) + +/* +------------------------------------------------------------------------------- +mix -- mix 3 32-bit values reversibly. + +This is reversible, so any information in (a,b,c) before mix() is +still in (a,b,c) after mix(). + +If four pairs of (a,b,c) inputs are run through mix(), or through +mix() in reverse, there are at least 32 bits of the output that +are sometimes the same for one pair and different for another pair. +This was tested for: +* pairs that differed by one bit, by two bits, in any combination + of top bits of (a,b,c), or in any combination of bottom bits of + (a,b,c). +* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed + the output delta to a Gray code (a^(a>>1)) so a string of 1's (as + is commonly produced by subtraction) look like a single 1-bit + difference. +* the base values were pseudorandom, all zero but one bit set, or + all zero plus a counter that starts at zero. + +Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that +satisfy this are + 4 6 8 16 19 4 + 9 15 3 18 27 15 + 14 9 3 7 17 3 +Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing +for "differ" defined as + with a one-bit base and a two-bit delta. I +used http://burtleburtle.net/bob/hash/avalanche.html to choose +the operations, constants, and arrangements of the variables. + +This does not achieve avalanche. There are input bits of (a,b,c) +that fail to affect some output bits of (a,b,c), especially of a. The +most thoroughly mixed value is c, but it doesn't really even achieve +avalanche in c. + +This allows some parallelism. Read-after-writes are good at doubling +the number of bits affected, so the goal of mixing pulls in the opposite +direction as the goal of parallelism. I did what I could. Rotates +seem to cost as much as shifts on every machine I could lay my hands +on, and rotates are much kinder to the top and bottom bits, so I used +rotates. +------------------------------------------------------------------------------- +*/ +#define mix(a,b,c) \ +{ \ + a -= c; a ^= rot(c, 4); c += b; \ + b -= a; b ^= rot(a, 6); a += c; \ + c -= b; c ^= rot(b, 8); b += a; \ + a -= c; a ^= rot(c,16); c += b; \ + b -= a; b ^= rot(a,19); a += c; \ + c -= b; c ^= rot(b, 4); b += a; \ +} + +/* +------------------------------------------------------------------------------- +final -- final mixing of 3 32-bit values (a,b,c) into c + +Pairs of (a,b,c) values differing in only a few bits will usually +produce values of c that look totally different. This was tested for +* pairs that differed by one bit, by two bits, in any combination + of top bits of (a,b,c), or in any combination of bottom bits of + (a,b,c). +* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed + the output delta to a Gray code (a^(a>>1)) so a string of 1's (as + is commonly produced by subtraction) look like a single 1-bit + difference. +* the base values were pseudorandom, all zero but one bit set, or + all zero plus a counter that starts at zero. + +These constants passed: + 14 11 25 16 4 14 24 + 12 14 25 16 4 14 24 +and these came close: + 4 8 15 26 3 22 24 + 10 8 15 26 3 22 24 + 11 8 15 26 3 22 24 +------------------------------------------------------------------------------- +*/ +#define final(a,b,c) \ +{ \ + c ^= b; c -= rot(b,14); \ + a ^= c; a -= rot(c,11); \ + b ^= a; b -= rot(a,25); \ + c ^= b; c -= rot(b,16); \ + a ^= c; a -= rot(c,4); \ + b ^= a; b -= rot(a,14); \ + c ^= b; c -= rot(b,24); \ +} + +/* +------------------------------------------------------------------------------- +hashlittle() -- hash a variable-length key into a 32-bit value + k : the key (the unaligned variable-length array of bytes) + length : the length of the key, counting by bytes + initval : can be any 4-byte value +Returns a 32-bit value. Every bit of the key affects every bit of +the return value. Two keys differing by one or two bits will have +totally different hash values. + +The best hash table sizes are powers of 2. There is no need to do +mod a prime (mod is sooo slow!). If you need less than 32 bits, +use a bitmask. For example, if you need only 10 bits, do + h = (h & hashmask(10)); +In which case, the hash table should have hashsize(10) elements. + +If you are hashing n strings (uint8_t **)k, do it like this: + for (i=0, h=0; i 12) + { + a += k[0]; + b += k[1]; + c += k[2]; + mix(a,b,c); + length -= 12; + k += 3; + } + + /*----------------------------- handle the last (probably partial) block */ + /* + * "k[2]&0xffffff" actually reads beyond the end of the string, but + * then masks off the part it's not allowed to read. Because the + * string is aligned, the masked-off tail is in the same word as the + * rest of the string. Every machine with memory protection I've seen + * does it on word boundaries, so is OK with this. But VALGRIND will + * still catch it and complain. The masking trick does make the hash + * noticeably faster for short strings (like English words). + */ +#ifndef NO_MASKING_TRICK + + switch(length) + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break; + case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break; + case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break; + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=k[1]&0xffffff; a+=k[0]; break; + case 6 : b+=k[1]&0xffff; a+=k[0]; break; + case 5 : b+=k[1]&0xff; a+=k[0]; break; + case 4 : a+=k[0]; break; + case 3 : a+=k[0]&0xffffff; break; + case 2 : a+=k[0]&0xffff; break; + case 1 : a+=k[0]&0xff; break; + case 0 : return c; /* zero length strings require no mixing */ + } + +#else /* make valgrind happy */ + + k8 = (const uint8_t *)k; + switch(length) + { + case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; + case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ + case 10: c+=((uint32_t)k8[9])<<8; /* fall through */ + case 9 : c+=k8[8]; /* fall through */ + case 8 : b+=k[1]; a+=k[0]; break; + case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ + case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */ + case 5 : b+=k8[4]; /* fall through */ + case 4 : a+=k[0]; break; + case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ + case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */ + case 1 : a+=k8[0]; break; + case 0 : return c; + } + +#endif /* !valgrind */ + + } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) { + const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */ + const uint8_t *k8; + + /*--------------- all but last block: aligned reads and different mixing */ + while (length > 12) + { + a += k[0] + (((uint32_t)k[1])<<16); + b += k[2] + (((uint32_t)k[3])<<16); + c += k[4] + (((uint32_t)k[5])<<16); + mix(a,b,c); + length -= 12; + k += 6; + } + + /*----------------------------- handle the last (probably partial) block */ + k8 = (const uint8_t *)k; + switch(length) + { + case 12: c+=k[4]+(((uint32_t)k[5])<<16); + b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ + case 10: c+=k[4]; + b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 9 : c+=k8[8]; /* fall through */ + case 8 : b+=k[2]+(((uint32_t)k[3])<<16); + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ + case 6 : b+=k[2]; + a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 5 : b+=k8[4]; /* fall through */ + case 4 : a+=k[0]+(((uint32_t)k[1])<<16); + break; + case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ + case 2 : a+=k[0]; + break; + case 1 : a+=k8[0]; + break; + case 0 : return c; /* zero length requires no mixing */ + } + + } else { /* need to read the key one byte at a time */ + const uint8_t *k = (const uint8_t *)key; + + /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ + while (length > 12) + { + a += k[0]; + a += ((uint32_t)k[1])<<8; + a += ((uint32_t)k[2])<<16; + a += ((uint32_t)k[3])<<24; + b += k[4]; + b += ((uint32_t)k[5])<<8; + b += ((uint32_t)k[6])<<16; + b += ((uint32_t)k[7])<<24; + c += k[8]; + c += ((uint32_t)k[9])<<8; + c += ((uint32_t)k[10])<<16; + c += ((uint32_t)k[11])<<24; + mix(a,b,c); + length -= 12; + k += 12; + } + + /*-------------------------------- last block: affect all 32 bits of (c) */ + switch(length) /* all the case statements fall through */ + { + case 12: c+=((uint32_t)k[11])<<24; /* fall through */ + case 11: c+=((uint32_t)k[10])<<16; /* fall through */ + case 10: c+=((uint32_t)k[9])<<8; /* fall through */ + case 9 : c+=k[8]; /* fall through */ + case 8 : b+=((uint32_t)k[7])<<24; /* fall through */ + case 7 : b+=((uint32_t)k[6])<<16; /* fall through */ + case 6 : b+=((uint32_t)k[5])<<8; /* fall through */ + case 5 : b+=k[4]; /* fall through */ + case 4 : a+=((uint32_t)k[3])<<24; /* fall through */ + case 3 : a+=((uint32_t)k[2])<<16; /* fall through */ + case 2 : a+=((uint32_t)k[1])<<8; /* fall through */ + case 1 : a+=k[0]; + break; + case 0 : return c; + } + } + + final(a,b,c); + return c; +} diff --git a/solo-ckpool-source/src/jansson-2.14/src/memory.c b/solo-ckpool-source/src/jansson-2.14/src/memory.c new file mode 100644 index 0000000..a912007 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/src/memory.c @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2009-2016 Petri Lehtinen + * Copyright (c) 2011-2012 Basile Starynkevitch + * Copyright (c) 2015,2017,2023 Con Kolivas + * + * Jansson is free software; you can redistribute it and/or modify it + * under the terms of the MIT license. See LICENSE for details. + */ + +#include +#include + +#include "jansson.h" +#include "jansson_private.h" + +/* C89 allows these to be macros */ +#undef malloc +#undef free + +/* memory function pointers */ +static json_malloc_t do_malloc = malloc; +static json_free_t do_free = free; + +void *jsonp_malloc(size_t size) { + if (!size) + return NULL; + + return (*do_malloc)(size); +} + +void _jsonp_free(void **ptr) { + if (!*ptr) + return; + + (*do_free)(*ptr); + *ptr = NULL; +} + +char *jsonp_strdup(const char *str) { return jsonp_strndup(str, strlen(str)); } + +char *jsonp_strndup(const char *str, size_t len) { + char *new_str; + + new_str = jsonp_malloc(len + 1); + if (!new_str) + return NULL; + + memcpy(new_str, str, len); + new_str[len] = '\0'; + return new_str; +} + +char *jsonp_strsteal(strbuffer_t *strbuff) +{ + size_t len = strbuff->length + 1; + char *ret = realloc(strbuff->value, len); + + return ret; +} + +char *jsonp_eolstrsteal(strbuffer_t *strbuff) +{ + size_t len = strbuff->length + 2; + char *ret = realloc(strbuff->value, len); + + ret[strbuff->length] = '\n'; + ret[strbuff->length + 1] = '\0'; + return ret; +} + +void json_set_alloc_funcs(json_malloc_t malloc_fn, json_free_t free_fn) { + do_malloc = malloc_fn; + do_free = free_fn; +} + +void json_get_alloc_funcs(json_malloc_t *malloc_fn, json_free_t *free_fn) { + if (malloc_fn) + *malloc_fn = do_malloc; + if (free_fn) + *free_fn = do_free; +} diff --git a/solo-ckpool-source/src/jansson-2.14/src/pack_unpack.c b/solo-ckpool-source/src/jansson-2.14/src/pack_unpack.c new file mode 100644 index 0000000..04c116e --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/src/pack_unpack.c @@ -0,0 +1,937 @@ +/* + * Copyright (c) 2009-2016 Petri Lehtinen + * Copyright (c) 2011-2012 Graeme Smecher + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#include "jansson.h" +#include "jansson_private.h" +#include "utf.h" +#include + +typedef struct { + int line; + int column; + size_t pos; + char token; +} token_t; + +typedef struct { + const char *start; + const char *fmt; + token_t prev_token; + token_t token; + token_t next_token; + json_error_t *error; + size_t flags; + int line; + int column; + size_t pos; + int has_error; +} scanner_t; + +#define token(scanner) ((scanner)->token.token) + +static const char *const type_names[] = {"object", "array", "string", "integer", + "real", "true", "false", "null"}; + +#define type_name(x) type_names[json_typeof(x)] + +static const char unpack_value_starters[] = "{[siIbfFOon"; + +static void scanner_init(scanner_t *s, json_error_t *error, size_t flags, + const char *fmt) { + s->error = error; + s->flags = flags; + s->fmt = s->start = fmt; + memset(&s->prev_token, 0, sizeof(token_t)); + memset(&s->token, 0, sizeof(token_t)); + memset(&s->next_token, 0, sizeof(token_t)); + s->line = 1; + s->column = 0; + s->pos = 0; + s->has_error = 0; +} + +static void next_token(scanner_t *s) { + const char *t; + s->prev_token = s->token; + + if (s->next_token.line) { + s->token = s->next_token; + s->next_token.line = 0; + return; + } + + if (!token(s) && !*s->fmt) + return; + + t = s->fmt; + s->column++; + s->pos++; + + /* skip space and ignored chars */ + while (*t == ' ' || *t == '\t' || *t == '\n' || *t == ',' || *t == ':') { + if (*t == '\n') { + s->line++; + s->column = 1; + } else + s->column++; + + s->pos++; + t++; + } + + s->token.token = *t; + s->token.line = s->line; + s->token.column = s->column; + s->token.pos = s->pos; + + if (*t) + t++; + s->fmt = t; +} + +static void prev_token(scanner_t *s) { + s->next_token = s->token; + s->token = s->prev_token; +} + +static void set_error(scanner_t *s, const char *source, enum json_error_code code, + const char *fmt, ...) { + va_list ap; + va_start(ap, fmt); + + jsonp_error_vset(s->error, s->token.line, s->token.column, s->token.pos, code, fmt, + ap); + + jsonp_error_set_source(s->error, source); + + va_end(ap); +} + +static json_t *pack(scanner_t *s, va_list *ap); + +/* ours will be set to 1 if jsonp_free() must be called for the result + afterwards */ +static char *read_string(scanner_t *s, va_list *ap, const char *purpose, size_t *out_len, + int *ours, int optional) { + char t; + strbuffer_t strbuff; + const char *str; + size_t length; + + next_token(s); + t = token(s); + prev_token(s); + + *ours = 0; + if (t != '#' && t != '%' && t != '+') { + /* Optimize the simple case */ + str = va_arg(*ap, const char *); + + if (!str) { + if (!optional) { + set_error(s, "", json_error_null_value, "NULL %s", purpose); + s->has_error = 1; + } + return NULL; + } + + length = strlen(str); + + if (!utf8_check_string(str, length)) { + set_error(s, "", json_error_invalid_utf8, "Invalid UTF-8 %s", purpose); + s->has_error = 1; + return NULL; + } + + *out_len = length; + return (char *)str; + } else if (optional) { + set_error(s, "", json_error_invalid_format, + "Cannot use '%c' on optional strings", t); + s->has_error = 1; + + return NULL; + } + + if (strbuffer_init(&strbuff)) { + set_error(s, "", json_error_out_of_memory, "Out of memory"); + s->has_error = 1; + } + + while (1) { + str = va_arg(*ap, const char *); + if (!str) { + set_error(s, "", json_error_null_value, "NULL %s", purpose); + s->has_error = 1; + } + + next_token(s); + + if (token(s) == '#') { + length = va_arg(*ap, int); + } else if (token(s) == '%') { + length = va_arg(*ap, size_t); + } else { + prev_token(s); + length = s->has_error ? 0 : strlen(str); + } + + if (!s->has_error && strbuffer_append_bytes(&strbuff, str, length) == -1) { + set_error(s, "", json_error_out_of_memory, "Out of memory"); + s->has_error = 1; + } + + next_token(s); + if (token(s) != '+') { + prev_token(s); + break; + } + } + + if (s->has_error) { + strbuffer_close(&strbuff); + return NULL; + } + + if (!utf8_check_string(strbuff.value, strbuff.length)) { + set_error(s, "", json_error_invalid_utf8, "Invalid UTF-8 %s", purpose); + strbuffer_close(&strbuff); + s->has_error = 1; + return NULL; + } + + *out_len = strbuff.length; + *ours = 1; + return strbuffer_steal_value(&strbuff); +} + +static json_t *pack_object(scanner_t *s, va_list *ap) { + json_t *object = json_object(); + next_token(s); + + while (token(s) != '}') { + char *key; + size_t len; + int ours; + json_t *value; + char valueOptional; + + if (!token(s)) { + set_error(s, "", json_error_invalid_format, + "Unexpected end of format string"); + goto error; + } + + if (token(s) != 's') { + set_error(s, "", json_error_invalid_format, + "Expected format 's', got '%c'", token(s)); + goto error; + } + + key = read_string(s, ap, "object key", &len, &ours, 0); + + next_token(s); + + next_token(s); + valueOptional = token(s); + prev_token(s); + + value = pack(s, ap); + if (!value) { + if (ours) + jsonp_free(key); + + if (valueOptional != '*') { + set_error(s, "", json_error_null_value, "NULL object value"); + s->has_error = 1; + } + + next_token(s); + continue; + } + + if (s->has_error) + json_decref(value); + + if (!s->has_error && json_object_set_new_nocheck(object, key, value)) { + set_error(s, "", json_error_out_of_memory, + "Unable to add key \"%s\"", key); + s->has_error = 1; + } + + if (ours) + jsonp_free(key); + + next_token(s); + } + + if (!s->has_error) + return object; + +error: + json_decref(object); + return NULL; +} + +static json_t *pack_array(scanner_t *s, va_list *ap) { + json_t *array = json_array(); + next_token(s); + + while (token(s) != ']') { + json_t *value; + char valueOptional; + + if (!token(s)) { + set_error(s, "", json_error_invalid_format, + "Unexpected end of format string"); + /* Format string errors are unrecoverable. */ + goto error; + } + + next_token(s); + valueOptional = token(s); + prev_token(s); + + value = pack(s, ap); + if (!value) { + if (valueOptional != '*') { + s->has_error = 1; + } + + next_token(s); + continue; + } + + if (s->has_error) + json_decref(value); + + if (!s->has_error && json_array_append_new(array, value)) { + set_error(s, "", json_error_out_of_memory, + "Unable to append to array"); + s->has_error = 1; + } + + next_token(s); + } + + if (!s->has_error) + return array; + +error: + json_decref(array); + return NULL; +} + +static json_t *pack_string(scanner_t *s, va_list *ap) { + char *str; + char t; + size_t len; + int ours; + int optional; + + next_token(s); + t = token(s); + optional = t == '?' || t == '*'; + if (!optional) + prev_token(s); + + str = read_string(s, ap, "string", &len, &ours, optional); + + if (!str) + return t == '?' && !s->has_error ? json_null() : NULL; + + if (s->has_error) { + /* It's impossible to reach this point if ours != 0, do not free str. */ + return NULL; + } + + if (ours) + return jsonp_stringn_nocheck_own(str, len); + + return json_stringn_nocheck(str, len); +} + +static json_t *pack_object_inter(scanner_t *s, va_list *ap, int need_incref) { + json_t *json; + char ntoken; + + next_token(s); + ntoken = token(s); + + if (ntoken != '?' && ntoken != '*') + prev_token(s); + + json = va_arg(*ap, json_t *); + + if (json) + return need_incref ? json_incref(json) : json; + + switch (ntoken) { + case '?': + return json_null(); + case '*': + return NULL; + default: + break; + } + + set_error(s, "", json_error_null_value, "NULL object"); + s->has_error = 1; + return NULL; +} + +static json_t *pack_integer(scanner_t *s, json_int_t value) { + json_t *json = json_integer(value); + + if (!json) { + set_error(s, "", json_error_out_of_memory, "Out of memory"); + s->has_error = 1; + } + + return json; +} + +static json_t *pack_real(scanner_t *s, double value) { + /* Allocate without setting value so we can identify OOM error. */ + json_t *json = json_real(0.0); + + if (!json) { + set_error(s, "", json_error_out_of_memory, "Out of memory"); + s->has_error = 1; + + return NULL; + } + + if (json_real_set(json, value)) { + json_decref(json); + + set_error(s, "", json_error_numeric_overflow, + "Invalid floating point value"); + s->has_error = 1; + + return NULL; + } + + return json; +} + +static json_t *pack(scanner_t *s, va_list *ap) { + switch (token(s)) { + case '{': + return pack_object(s, ap); + + case '[': + return pack_array(s, ap); + + case 's': /* string */ + return pack_string(s, ap); + + case 'n': /* null */ + return json_null(); + + case 'b': /* boolean */ + return va_arg(*ap, int) ? json_true() : json_false(); + + case 'i': /* integer from int */ + return pack_integer(s, va_arg(*ap, int)); + + case 'I': /* integer from json_int_t */ + return pack_integer(s, va_arg(*ap, json_int_t)); + + case 'f': /* real */ + return pack_real(s, va_arg(*ap, double)); + + case 'O': /* a json_t object; increments refcount */ + return pack_object_inter(s, ap, 1); + + case 'o': /* a json_t object; doesn't increment refcount */ + return pack_object_inter(s, ap, 0); + + default: + set_error(s, "", json_error_invalid_format, + "Unexpected format character '%c'", token(s)); + s->has_error = 1; + return NULL; + } +} + +static int unpack(scanner_t *s, json_t *root, va_list *ap); + +static int unpack_object(scanner_t *s, json_t *root, va_list *ap) { + int ret = -1; + int strict = 0; + int gotopt = 0; + + /* Use a set (emulated by a hashtable) to check that all object + keys are accessed. Checking that the correct number of keys + were accessed is not enough, as the same key can be unpacked + multiple times. + */ + hashtable_t key_set; + + if (hashtable_init(&key_set)) { + set_error(s, "", json_error_out_of_memory, "Out of memory"); + return -1; + } + + if (root && !json_is_object(root)) { + set_error(s, "", json_error_wrong_type, "Expected object, got %s", + type_name(root)); + goto out; + } + next_token(s); + + while (token(s) != '}') { + const char *key; + json_t *value; + int opt = 0; + + if (strict != 0) { + set_error(s, "", json_error_invalid_format, + "Expected '}' after '%c', got '%c'", (strict == 1 ? '!' : '*'), + token(s)); + goto out; + } + + if (!token(s)) { + set_error(s, "", json_error_invalid_format, + "Unexpected end of format string"); + goto out; + } + + if (token(s) == '!' || token(s) == '*') { + strict = (token(s) == '!' ? 1 : -1); + next_token(s); + continue; + } + + if (token(s) != 's') { + set_error(s, "", json_error_invalid_format, + "Expected format 's', got '%c'", token(s)); + goto out; + } + + key = va_arg(*ap, const char *); + if (!key) { + set_error(s, "", json_error_null_value, "NULL object key"); + goto out; + } + + next_token(s); + + if (token(s) == '?') { + opt = gotopt = 1; + next_token(s); + } + + if (!root) { + /* skipping */ + value = NULL; + } else { + value = json_object_get(root, key); + if (!value && !opt) { + set_error(s, "", json_error_item_not_found, + "Object item not found: %s", key); + goto out; + } + } + + if (unpack(s, value, ap)) + goto out; + + hashtable_set(&key_set, key, strlen(key), json_null()); + next_token(s); + } + + if (strict == 0 && (s->flags & JSON_STRICT)) + strict = 1; + + if (root && strict == 1) { + /* We need to check that all non optional items have been parsed */ + const char *key; + size_t key_len; + /* keys_res is 1 for uninitialized, 0 for success, -1 for error. */ + int keys_res = 1; + strbuffer_t unrecognized_keys; + json_t *value; + long unpacked = 0; + + if (gotopt || json_object_size(root) != key_set.size) { + json_object_foreach(root, key, value) { + key_len = strlen(key); + if (!hashtable_get(&key_set, key, key_len)) { + unpacked++; + + /* Save unrecognized keys for the error message */ + if (keys_res == 1) { + keys_res = strbuffer_init(&unrecognized_keys); + } else if (!keys_res) { + keys_res = strbuffer_append_bytes(&unrecognized_keys, ", ", 2); + } + + if (!keys_res) + keys_res = + strbuffer_append_bytes(&unrecognized_keys, key, key_len); + } + } + } + if (unpacked) { + set_error(s, "", json_error_end_of_input_expected, + "%li object item(s) left unpacked: %s", unpacked, + keys_res ? "" : strbuffer_value(&unrecognized_keys)); + strbuffer_close(&unrecognized_keys); + goto out; + } + } + + ret = 0; + +out: + hashtable_close(&key_set); + return ret; +} + +static int unpack_array(scanner_t *s, json_t *root, va_list *ap) { + size_t i = 0; + int strict = 0; + + if (root && !json_is_array(root)) { + set_error(s, "", json_error_wrong_type, "Expected array, got %s", + type_name(root)); + return -1; + } + next_token(s); + + while (token(s) != ']') { + json_t *value; + + if (strict != 0) { + set_error(s, "", json_error_invalid_format, + "Expected ']' after '%c', got '%c'", (strict == 1 ? '!' : '*'), + token(s)); + return -1; + } + + if (!token(s)) { + set_error(s, "", json_error_invalid_format, + "Unexpected end of format string"); + return -1; + } + + if (token(s) == '!' || token(s) == '*') { + strict = (token(s) == '!' ? 1 : -1); + next_token(s); + continue; + } + + if (!strchr(unpack_value_starters, token(s))) { + set_error(s, "", json_error_invalid_format, + "Unexpected format character '%c'", token(s)); + return -1; + } + + if (!root) { + /* skipping */ + value = NULL; + } else { + value = json_array_get(root, i); + if (!value) { + set_error(s, "", json_error_index_out_of_range, + "Array index %lu out of range", (unsigned long)i); + return -1; + } + } + + if (unpack(s, value, ap)) + return -1; + + next_token(s); + i++; + } + + if (strict == 0 && (s->flags & JSON_STRICT)) + strict = 1; + + if (root && strict == 1 && i != json_array_size(root)) { + long diff = (long)json_array_size(root) - (long)i; + set_error(s, "", json_error_end_of_input_expected, + "%li array item(s) left unpacked", diff); + return -1; + } + + return 0; +} + +static int unpack(scanner_t *s, json_t *root, va_list *ap) { + switch (token(s)) { + case '{': + return unpack_object(s, root, ap); + + case '[': + return unpack_array(s, root, ap); + + case 's': + if (root && !json_is_string(root)) { + set_error(s, "", json_error_wrong_type, + "Expected string, got %s", type_name(root)); + return -1; + } + + if (!(s->flags & JSON_VALIDATE_ONLY)) { + const char **str_target; + size_t *len_target = NULL; + + str_target = va_arg(*ap, const char **); + if (!str_target) { + set_error(s, "", json_error_null_value, "NULL string argument"); + return -1; + } + + next_token(s); + + if (token(s) == '%') { + len_target = va_arg(*ap, size_t *); + if (!len_target) { + set_error(s, "", json_error_null_value, + "NULL string length argument"); + return -1; + } + } else + prev_token(s); + + if (root) { + *str_target = json_string_value(root); + if (len_target) + *len_target = json_string_length(root); + } + } + return 0; + + case 'i': + if (root && !json_is_integer(root)) { + set_error(s, "", json_error_wrong_type, + "Expected integer, got %s", type_name(root)); + return -1; + } + + if (!(s->flags & JSON_VALIDATE_ONLY)) { + int *target = va_arg(*ap, int *); + if (root) + *target = (int)json_integer_value(root); + } + + return 0; + + case 'I': + if (root && !json_is_integer(root)) { + set_error(s, "", json_error_wrong_type, + "Expected integer, got %s", type_name(root)); + return -1; + } + + if (!(s->flags & JSON_VALIDATE_ONLY)) { + json_int_t *target = va_arg(*ap, json_int_t *); + if (root) + *target = json_integer_value(root); + } + + return 0; + + case 'b': + if (root && !json_is_boolean(root)) { + set_error(s, "", json_error_wrong_type, + "Expected true or false, got %s", type_name(root)); + return -1; + } + + if (!(s->flags & JSON_VALIDATE_ONLY)) { + int *target = va_arg(*ap, int *); + if (root) + *target = json_is_true(root); + } + + return 0; + + case 'f': + if (root && !json_is_real(root)) { + set_error(s, "", json_error_wrong_type, + "Expected real, got %s", type_name(root)); + return -1; + } + + if (!(s->flags & JSON_VALIDATE_ONLY)) { + double *target = va_arg(*ap, double *); + if (root) + *target = json_real_value(root); + } + + return 0; + + case 'F': + if (root && !json_is_number(root)) { + set_error(s, "", json_error_wrong_type, + "Expected real or integer, got %s", type_name(root)); + return -1; + } + + if (!(s->flags & JSON_VALIDATE_ONLY)) { + double *target = va_arg(*ap, double *); + if (root) + *target = json_number_value(root); + } + + return 0; + + case 'O': + if (root && !(s->flags & JSON_VALIDATE_ONLY)) + json_incref(root); + /* Fall through */ + + case 'o': + if (!(s->flags & JSON_VALIDATE_ONLY)) { + json_t **target = va_arg(*ap, json_t **); + if (root) + *target = root; + } + + return 0; + + case 'n': + /* Never assign, just validate */ + if (root && !json_is_null(root)) { + set_error(s, "", json_error_wrong_type, + "Expected null, got %s", type_name(root)); + return -1; + } + return 0; + + default: + set_error(s, "", json_error_invalid_format, + "Unexpected format character '%c'", token(s)); + return -1; + } +} + +json_t *json_vpack_ex(json_error_t *error, size_t flags, const char *fmt, va_list ap) { + scanner_t s; + va_list ap_copy; + json_t *value; + + if (!fmt || !*fmt) { + jsonp_error_init(error, ""); + jsonp_error_set(error, -1, -1, 0, json_error_invalid_argument, + "NULL or empty format string"); + return NULL; + } + jsonp_error_init(error, NULL); + + scanner_init(&s, error, flags, fmt); + next_token(&s); + + va_copy(ap_copy, ap); + value = pack(&s, &ap_copy); + va_end(ap_copy); + + /* This will cover all situations where s.has_error is true */ + if (!value) + return NULL; + + next_token(&s); + if (token(&s)) { + json_decref(value); + set_error(&s, "", json_error_invalid_format, + "Garbage after format string"); + return NULL; + } + + return value; +} + +json_t *json_pack_ex(json_error_t *error, size_t flags, const char *fmt, ...) { + json_t *value; + va_list ap; + + va_start(ap, fmt); + value = json_vpack_ex(error, flags, fmt, ap); + va_end(ap); + + return value; +} + +json_t *json_pack(const char *fmt, ...) { + json_t *value; + va_list ap; + + va_start(ap, fmt); + value = json_vpack_ex(NULL, 0, fmt, ap); + va_end(ap); + + return value; +} + +int json_vunpack_ex(json_t *root, json_error_t *error, size_t flags, const char *fmt, + va_list ap) { + scanner_t s; + va_list ap_copy; + + if (!root) { + jsonp_error_init(error, ""); + jsonp_error_set(error, -1, -1, 0, json_error_null_value, "NULL root value"); + return -1; + } + + if (!fmt || !*fmt) { + jsonp_error_init(error, ""); + jsonp_error_set(error, -1, -1, 0, json_error_invalid_argument, + "NULL or empty format string"); + return -1; + } + jsonp_error_init(error, NULL); + + scanner_init(&s, error, flags, fmt); + next_token(&s); + + va_copy(ap_copy, ap); + if (unpack(&s, root, &ap_copy)) { + va_end(ap_copy); + return -1; + } + va_end(ap_copy); + + next_token(&s); + if (token(&s)) { + set_error(&s, "", json_error_invalid_format, + "Garbage after format string"); + return -1; + } + + return 0; +} + +int json_unpack_ex(json_t *root, json_error_t *error, size_t flags, const char *fmt, + ...) { + int ret; + va_list ap; + + va_start(ap, fmt); + ret = json_vunpack_ex(root, error, flags, fmt, ap); + va_end(ap); + + return ret; +} + +int json_unpack(json_t *root, const char *fmt, ...) { + int ret; + va_list ap; + + va_start(ap, fmt); + ret = json_vunpack_ex(root, NULL, 0, fmt, ap); + va_end(ap); + + return ret; +} diff --git a/solo-ckpool-source/src/jansson-2.14/src/strbuffer.c b/solo-ckpool-source/src/jansson-2.14/src/strbuffer.c new file mode 100644 index 0000000..733da31 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/src/strbuffer.c @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2009-2016 Petri Lehtinen + * Copyright (c) 2015,2017,2023 Con Kolivas + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#include "strbuffer.h" +#include "jansson_private.h" +#include +#include +#include + +#define STRBUFFER_MIN_SIZE 4096 +#define STRBUFFER_FACTOR 2 +#define STRBUFFER_SIZE_MAX ((size_t)-1) + +int strbuffer_init(strbuffer_t *strbuff) { + strbuff->size = STRBUFFER_MIN_SIZE; + strbuff->length = 0; + + strbuff->value = jsonp_malloc(strbuff->size); + if (!strbuff->value) + return -1; + + /* initialize to empty */ + strbuff->value[0] = '\0'; + return 0; +} + +void strbuffer_close(strbuffer_t *strbuff) { + if (strbuff->value) + jsonp_free(strbuff->value); + + strbuff->size = 0; + strbuff->length = 0; + strbuff->value = NULL; +} + +void strbuffer_clear(strbuffer_t *strbuff) { + strbuff->length = 0; + strbuff->value[0] = '\0'; +} + +const char *strbuffer_value(const strbuffer_t *strbuff) { return strbuff->value; } + +char *strbuffer_steal_value(strbuffer_t *strbuff) { + char *result = strbuff->value; + strbuff->value = NULL; + return result; +} + +int strbuffer_append_byte(strbuffer_t *strbuff, char byte) { + return strbuffer_append_bytes(strbuff, &byte, 1); +} + +int strbuffer_append_bytes(strbuffer_t *strbuff, const char *data, size_t size) { + /* Leave room for EOL and NULL bytes */ + if(size + 2 > strbuff->size - strbuff->length) { + int backoff = 1; + size_t new_size; + char *new_value; + + /* avoid integer overflow */ + if (strbuff->size > STRBUFFER_SIZE_MAX / STRBUFFER_FACTOR || + size > STRBUFFER_SIZE_MAX - 1 || + strbuff->length > STRBUFFER_SIZE_MAX - 1 - size) + return -1; + + new_size = max(strbuff->size * STRBUFFER_FACTOR, strbuff->length + size + 1); + + while (42) { + new_value = realloc(strbuff->value, new_size); + if (new_value) + break; + usleep(backoff * 1000); + backoff <<= 1; + } + + strbuff->value = new_value; + strbuff->size = new_size; + } + + memcpy(strbuff->value + strbuff->length, data, size); + strbuff->length += size; + strbuff->value[strbuff->length] = '\0'; + + return 0; +} + +char strbuffer_pop(strbuffer_t *strbuff) { + if (strbuff->length > 0) { + char c = strbuff->value[--strbuff->length]; + strbuff->value[strbuff->length] = '\0'; + return c; + } else + return '\0'; +} diff --git a/solo-ckpool-source/src/jansson-2.14/src/strbuffer.h b/solo-ckpool-source/src/jansson-2.14/src/strbuffer.h new file mode 100644 index 0000000..70f2646 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/src/strbuffer.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2009-2016 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef STRBUFFER_H +#define STRBUFFER_H + +#include "jansson.h" +#include + +typedef struct { + char *value; + size_t length; /* bytes used */ + size_t size; /* bytes allocated */ +} strbuffer_t; + +int strbuffer_init(strbuffer_t *strbuff) JANSSON_ATTRS((warn_unused_result)); +void strbuffer_close(strbuffer_t *strbuff); + +void strbuffer_clear(strbuffer_t *strbuff); + +const char *strbuffer_value(const strbuffer_t *strbuff); + +/* Steal the value and close the strbuffer */ +char *strbuffer_steal_value(strbuffer_t *strbuff); + +int strbuffer_append_byte(strbuffer_t *strbuff, char byte); +int strbuffer_append_bytes(strbuffer_t *strbuff, const char *data, size_t size); + +char strbuffer_pop(strbuffer_t *strbuff); + +#endif diff --git a/solo-ckpool-source/src/jansson-2.14/src/strconv.c b/solo-ckpool-source/src/jansson-2.14/src/strconv.c new file mode 100644 index 0000000..c6f4fd1 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/src/strconv.c @@ -0,0 +1,132 @@ +#include "jansson_private.h" +#include "strbuffer.h" +#include +#include +#include +#include +#include + +/* need jansson_private_config.h to get the correct snprintf */ +#ifdef HAVE_CONFIG_H +#include +#endif + +#if JSON_HAVE_LOCALECONV +#include + +/* + - This code assumes that the decimal separator is exactly one + character. + + - If setlocale() is called by another thread between the call to + localeconv() and the call to sprintf() or strtod(), the result may + be wrong. setlocale() is not thread-safe and should not be used + this way. Multi-threaded programs should use uselocale() instead. +*/ + +static void to_locale(strbuffer_t *strbuffer) { + const char *point; + char *pos; + + point = localeconv()->decimal_point; + if (*point == '.') { + /* No conversion needed */ + return; + } + + pos = strchr(strbuffer->value, '.'); + if (pos) + *pos = *point; +} + +static void from_locale(char *buffer) { + const char *point; + char *pos; + + point = localeconv()->decimal_point; + if (*point == '.') { + /* No conversion needed */ + return; + } + + pos = strchr(buffer, *point); + if (pos) + *pos = '.'; +} +#endif + +int jsonp_strtod(strbuffer_t *strbuffer, double *out) { + double value; + char *end; + +#if JSON_HAVE_LOCALECONV + to_locale(strbuffer); +#endif + + errno = 0; + value = strtod(strbuffer->value, &end); + assert(end == strbuffer->value + strbuffer->length); + + if ((value == HUGE_VAL || value == -HUGE_VAL) && errno == ERANGE) { + /* Overflow */ + return -1; + } + + *out = value; + return 0; +} + +int jsonp_dtostr(char *buffer, size_t size, double value, int precision) { + int ret; + char *start, *end; + size_t length; + + if (precision == 0) + precision = 17; + + ret = snprintf(buffer, size, "%.*g", precision, value); + if (ret < 0) + return -1; + + length = (size_t)ret; + if (length >= size) + return -1; + +#if JSON_HAVE_LOCALECONV + from_locale(buffer); +#endif + + /* Make sure there's a dot or 'e' in the output. Otherwise + a real is converted to an integer when decoding */ + if (strchr(buffer, '.') == NULL && strchr(buffer, 'e') == NULL) { + if (length + 3 >= size) { + /* No space to append ".0" */ + return -1; + } + buffer[length] = '.'; + buffer[length + 1] = '0'; + buffer[length + 2] = '\0'; + length += 2; + } + + /* Remove leading '+' from positive exponent. Also remove leading + zeros from exponents (added by some printf() implementations) */ + start = strchr(buffer, 'e'); + if (start) { + start++; + end = start + 1; + + if (*start == '-') + start++; + + while (*end == '0') + end++; + + if (end != start) { + memmove(start, end, length - (size_t)(end - buffer)); + length -= (size_t)(end - start); + } + } + + return (int)length; +} diff --git a/solo-ckpool-source/src/jansson-2.14/src/utf.c b/solo-ckpool-source/src/jansson-2.14/src/utf.c new file mode 100644 index 0000000..135a3f3 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/src/utf.c @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2009-2016 Petri Lehtinen + * Copyright (c) 2015,2017,2023 Con Kolivas + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#include "utf.h" +#include + +int utf8_encode(int32_t codepoint, char *buffer, size_t *size) { + if (codepoint < 0) + return -1; + else if (codepoint < 0x80) { + buffer[0] = (char)codepoint; + *size = 1; + } else if (codepoint < 0x800) { + buffer[0] = 0xC0 + ((codepoint & 0x7C0) >> 6); + buffer[1] = 0x80 + ((codepoint & 0x03F)); + *size = 2; + } else if (codepoint < 0x10000) { + buffer[0] = 0xE0 + ((codepoint & 0xF000) >> 12); + buffer[1] = 0x80 + ((codepoint & 0x0FC0) >> 6); + buffer[2] = 0x80 + ((codepoint & 0x003F)); + *size = 3; + } else if (codepoint <= 0x10FFFF) { + buffer[0] = 0xF0 + ((codepoint & 0x1C0000) >> 18); + buffer[1] = 0x80 + ((codepoint & 0x03F000) >> 12); + buffer[2] = 0x80 + ((codepoint & 0x000FC0) >> 6); + buffer[3] = 0x80 + ((codepoint & 0x00003F)); + *size = 4; + } else + return -1; + + return 0; +} + +size_t utf8_check_first(char byte) { + unsigned char u = (unsigned char)byte; + + if (u < 0x80) + return 1; + + if (0x80 <= u && u <= 0xBF) { + /* second, third or fourth byte of a multi-byte + sequence, i.e. a "continuation byte" */ + return 0; + } else if (u == 0xC0 || u == 0xC1) { + /* overlong encoding of an ASCII byte */ + return 0; + } else if (0xC2 <= u && u <= 0xDF) { + /* 2-byte sequence */ + return 2; + } + + else if (0xE0 <= u && u <= 0xEF) { + /* 3-byte sequence */ + return 3; + } else if (0xF0 <= u && u <= 0xF4) { + /* 4-byte sequence */ + return 4; + } else { /* u >= 0xF5 */ + /* Restricted (start of 4-, 5- or 6-byte sequence) or invalid + UTF-8 */ + return 0; + } +} + +size_t utf8_check_full(const char *buffer, size_t size, int32_t *codepoint) { + size_t i; + int32_t value = 0; + unsigned char u = (unsigned char)buffer[0]; + + if (size == 2) { + value = u & 0x1F; + } else if (size == 3) { + value = u & 0xF; + } else if (size == 4) { + value = u & 0x7; + } else + return 0; + + for (i = 1; i < size; i++) { + u = (unsigned char)buffer[i]; + + if (u < 0x80 || u > 0xBF) { + /* not a continuation byte */ + return 0; + } + + value = (value << 6) + (u & 0x3F); + } + + if (value > 0x10FFFF) { + /* not in Unicode range */ + return 0; + } + + else if (0xD800 <= value && value <= 0xDFFF) { + /* invalid code point (UTF-16 surrogate halves) */ + return 0; + } + + else if ((size == 2 && value < 0x80) || (size == 3 && value < 0x800) || + (size == 4 && value < 0x10000)) { + /* overlong encoding */ + return 0; + } + + if (codepoint) + *codepoint = value; + + return 1; +} + +const char *utf8_iterate(const char *buffer, size_t bufsize, int32_t *codepoint, int noutf8) +{ + size_t count = 1; + int32_t value; + + if (!bufsize) + return buffer; + + if (!noutf8) { + count = utf8_check_first(buffer[0]); + if(count <= 0) + return NULL; + } + + if (count == 1) + value = (unsigned char)buffer[0]; + else { + if (count > bufsize || !utf8_check_full(buffer, count, &value)) + return NULL; + } + + if (codepoint) + *codepoint = value; + + return buffer + count; +} + +int utf8_check_string(const char *string, size_t length) { + size_t i; + + for (i = 0; i < length; i++) { + size_t count = utf8_check_first(string[i]); + if (count == 0) + return 0; + else if (count > 1) { + if (count > length - i) + return 0; + + if (!utf8_check_full(&string[i], count, NULL)) + return 0; + + i += count - 1; + } + } + + return 1; +} diff --git a/solo-ckpool-source/src/jansson-2.14/src/utf.h b/solo-ckpool-source/src/jansson-2.14/src/utf.h new file mode 100644 index 0000000..a5c46cb --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/src/utf.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2009-2016 Petri Lehtinen + * Copyright (c) 2015,2017,2023 Con Kolivas + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef UTF_H +#define UTF_H + +#ifdef HAVE_CONFIG_H +#include +#endif + +#include +#ifdef HAVE_STDINT_H +#include +#endif + +int utf8_encode(int32_t codepoint, char *buffer, size_t *size); + +size_t utf8_check_first(char byte); +size_t utf8_check_full(const char *buffer, size_t size, int32_t *codepoint); +const char *utf8_iterate(const char *buffer, size_t size, int32_t *codepoint, int noutf8); + +int utf8_check_string(const char *string, size_t length); + +#endif diff --git a/solo-ckpool-source/src/jansson-2.14/src/value.c b/solo-ckpool-source/src/jansson-2.14/src/value.c new file mode 100644 index 0000000..07af087 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/src/value.c @@ -0,0 +1,1112 @@ +/* + * Copyright (c) 2009-2016 Petri Lehtinen + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#ifdef HAVE_CONFIG_H +#include +#endif + +#include +#include +#include +#include + +#ifdef HAVE_STDINT_H +#include +#endif + +#include "hashtable.h" +#include "jansson.h" +#include "jansson_private.h" +#include "utf.h" + +/* Work around nonstandard isnan() and isinf() implementations */ +#ifndef isnan +#ifndef __sun +static JSON_INLINE int isnan(double x) { return x != x; } +#endif +#endif +#ifndef isinf +static JSON_INLINE int isinf(double x) { return !isnan(x) && isnan(x - x); } +#endif + +json_t *do_deep_copy(const json_t *json, hashtable_t *parents); + +static JSON_INLINE void json_init(json_t *json, json_type type) { + json->type = type; + json->refcount = 1; +} + +int jsonp_loop_check(hashtable_t *parents, const json_t *json, char *key, size_t key_size, + size_t *key_len_out) { + size_t key_len = snprintf(key, key_size, "%p", json); + + if (key_len_out) + *key_len_out = key_len; + + if (hashtable_get(parents, key, key_len)) + return -1; + + return hashtable_set(parents, key, key_len, json_null()); +} + +/*** object ***/ + +extern volatile uint32_t hashtable_seed; + +json_t *json_object(void) { + json_object_t *object = jsonp_malloc(sizeof(json_object_t)); + if (!object) + return NULL; + + if (!hashtable_seed) { + /* Autoseed */ + json_object_seed(0); + } + + json_init(&object->json, JSON_OBJECT); + + if (hashtable_init(&object->hashtable)) { + jsonp_free(object); + return NULL; + } + + return &object->json; +} + +static void json_delete_object(json_object_t *object) { + hashtable_close(&object->hashtable); + jsonp_free(object); +} + +size_t json_object_size(const json_t *json) { + json_object_t *object; + + if (!json_is_object(json)) + return 0; + + object = json_to_object(json); + return object->hashtable.size; +} + +json_t *json_object_get(const json_t *json, const char *key) { + if (!key) + return NULL; + + return json_object_getn(json, key, strlen(key)); +} + +json_t *json_object_getn(const json_t *json, const char *key, size_t key_len) { + json_object_t *object; + + if (!key || !json_is_object(json)) + return NULL; + + object = json_to_object(json); + return hashtable_get(&object->hashtable, key, key_len); +} + +int json_object_set_new_nocheck(json_t *json, const char *key, json_t *value) { + if (!key) { + json_decref(value); + return -1; + } + return json_object_setn_new_nocheck(json, key, strlen(key), value); +} + +int json_object_setn_new_nocheck(json_t *json, const char *key, size_t key_len, + json_t *value) { + json_object_t *object; + + if (!value) + return -1; + + if (!key || !json_is_object(json) || json == value) { + json_decref(value); + return -1; + } + object = json_to_object(json); + + if (hashtable_set(&object->hashtable, key, key_len, value)) { + json_decref(value); + return -1; + } + + return 0; +} + +int json_object_set_new(json_t *json, const char *key, json_t *value) { + if (!key) { + json_decref(value); + return -1; + } + + return json_object_setn_new(json, key, strlen(key), value); +} + +int json_object_setn_new(json_t *json, const char *key, size_t key_len, json_t *value) { + if (!key || !utf8_check_string(key, key_len)) { + json_decref(value); + return -1; + } + + return json_object_setn_new_nocheck(json, key, key_len, value); +} + +int json_object_del(json_t *json, const char *key) { + if (!key) + return -1; + + return json_object_deln(json, key, strlen(key)); +} + +int json_object_deln(json_t *json, const char *key, size_t key_len) { + json_object_t *object; + + if (!key || !json_is_object(json)) + return -1; + + object = json_to_object(json); + return hashtable_del(&object->hashtable, key, key_len); +} + +int json_object_clear(json_t *json) { + json_object_t *object; + + if (!json_is_object(json)) + return -1; + + object = json_to_object(json); + hashtable_clear(&object->hashtable); + + return 0; +} + +int json_object_update(json_t *object, json_t *other) { + const char *key; + json_t *value; + + if (!json_is_object(object) || !json_is_object(other)) + return -1; + + json_object_foreach(other, key, value) { + if (json_object_set_nocheck(object, key, value)) + return -1; + } + + return 0; +} + +int json_object_update_existing(json_t *object, json_t *other) { + const char *key; + size_t key_len; + json_t *value; + + if (!json_is_object(object) || !json_is_object(other)) + return -1; + + json_object_keylen_foreach(other, key, key_len, value) { + if (json_object_getn(object, key, key_len)) + json_object_setn_nocheck(object, key, key_len, value); + } + + return 0; +} + +int json_object_update_missing(json_t *object, json_t *other) { + const char *key; + json_t *value; + + if (!json_is_object(object) || !json_is_object(other)) + return -1; + + json_object_foreach(other, key, value) { + if (!json_object_get(object, key)) + json_object_set_nocheck(object, key, value); + } + + return 0; +} + +int do_object_update_recursive(json_t *object, json_t *other, hashtable_t *parents) { + const char *key; + size_t key_len; + json_t *value; + char loop_key[LOOP_KEY_LEN]; + int res = 0; + size_t loop_key_len; + + if (!json_is_object(object) || !json_is_object(other)) + return -1; + + if (jsonp_loop_check(parents, other, loop_key, sizeof(loop_key), &loop_key_len)) + return -1; + + json_object_keylen_foreach(other, key, key_len, value) { + json_t *v = json_object_get(object, key); + + if (json_is_object(v) && json_is_object(value)) { + if (do_object_update_recursive(v, value, parents)) { + res = -1; + break; + } + } else { + if (json_object_setn_nocheck(object, key, key_len, value)) { + res = -1; + break; + } + } + } + + hashtable_del(parents, loop_key, loop_key_len); + + return res; +} + +int json_object_update_recursive(json_t *object, json_t *other) { + int res; + hashtable_t parents_set; + + if (hashtable_init(&parents_set)) + return -1; + res = do_object_update_recursive(object, other, &parents_set); + hashtable_close(&parents_set); + + return res; +} + +void *json_object_iter(json_t *json) { + json_object_t *object; + + if (!json_is_object(json)) + return NULL; + + object = json_to_object(json); + return hashtable_iter(&object->hashtable); +} + +void *json_object_iter_at(json_t *json, const char *key) { + json_object_t *object; + + if (!key || !json_is_object(json)) + return NULL; + + object = json_to_object(json); + return hashtable_iter_at(&object->hashtable, key, strlen(key)); +} + +void *json_object_iter_next(json_t *json, void *iter) { + json_object_t *object; + + if (!json_is_object(json) || iter == NULL) + return NULL; + + object = json_to_object(json); + return hashtable_iter_next(&object->hashtable, iter); +} + +const char *json_object_iter_key(void *iter) { + if (!iter) + return NULL; + + return hashtable_iter_key(iter); +} + +size_t json_object_iter_key_len(void *iter) { + if (!iter) + return 0; + + return hashtable_iter_key_len(iter); +} + +json_t *json_object_iter_value(void *iter) { + if (!iter) + return NULL; + + return (json_t *)hashtable_iter_value(iter); +} + +int json_object_iter_set_new(json_t *json, void *iter, json_t *value) { + if (!json_is_object(json) || !iter || !value) { + json_decref(value); + return -1; + } + + hashtable_iter_set(iter, value); + return 0; +} + +void *json_object_key_to_iter(const char *key) { + if (!key) + return NULL; + + return hashtable_key_to_iter(key); +} + +static int json_object_equal(const json_t *object1, const json_t *object2) { + const char *key; + const json_t *value1, *value2; + + if (json_object_size(object1) != json_object_size(object2)) + return 0; + + json_object_foreach((json_t *)object1, key, value1) { + value2 = json_object_get(object2, key); + + if (!json_equal(value1, value2)) + return 0; + } + + return 1; +} + +static json_t *json_object_copy(json_t *object) { + json_t *result; + + const char *key; + json_t *value; + + result = json_object(); + if (!result) + return NULL; + + json_object_foreach(object, key, value) json_object_set_nocheck(result, key, value); + + return result; +} + +static json_t *json_object_deep_copy(const json_t *object, hashtable_t *parents) { + json_t *result; + void *iter; + char loop_key[LOOP_KEY_LEN]; + size_t loop_key_len; + + if (jsonp_loop_check(parents, object, loop_key, sizeof(loop_key), &loop_key_len)) + return NULL; + + result = json_object(); + if (!result) + goto out; + + /* Cannot use json_object_foreach because object has to be cast + non-const */ + iter = json_object_iter((json_t *)object); + while (iter) { + const char *key; + const json_t *value; + key = json_object_iter_key(iter); + value = json_object_iter_value(iter); + + if (json_object_set_new_nocheck(result, key, do_deep_copy(value, parents))) { + json_decref(result); + result = NULL; + break; + } + iter = json_object_iter_next((json_t *)object, iter); + } + +out: + hashtable_del(parents, loop_key, loop_key_len); + + return result; +} + +/*** array ***/ + +json_t *json_array(void) { + json_array_t *array = jsonp_malloc(sizeof(json_array_t)); + if (!array) + return NULL; + json_init(&array->json, JSON_ARRAY); + + array->entries = 0; + array->size = 8; + + array->table = jsonp_malloc(array->size * sizeof(json_t *)); + if (!array->table) { + jsonp_free(array); + return NULL; + } + + return &array->json; +} + +static void json_delete_array(json_array_t *array) { + size_t i; + + for (i = 0; i < array->entries; i++) + json_decref(array->table[i]); + + jsonp_free(array->table); + jsonp_free(array); +} + +size_t json_array_size(const json_t *json) { + if (!json_is_array(json)) + return 0; + + return json_to_array(json)->entries; +} + +json_t *json_array_get(const json_t *json, size_t index) { + json_array_t *array; + if (!json_is_array(json)) + return NULL; + array = json_to_array(json); + + if (index >= array->entries) + return NULL; + + return array->table[index]; +} + +int json_array_set_new(json_t *json, size_t index, json_t *value) { + json_array_t *array; + + if (!value) + return -1; + + if (!json_is_array(json) || json == value) { + json_decref(value); + return -1; + } + array = json_to_array(json); + + if (index >= array->entries) { + json_decref(value); + return -1; + } + + json_decref(array->table[index]); + array->table[index] = value; + + return 0; +} + +static void array_move(json_array_t *array, size_t dest, size_t src, size_t count) { + memmove(&array->table[dest], &array->table[src], count * sizeof(json_t *)); +} + +static void array_copy(json_t **dest, size_t dpos, json_t **src, size_t spos, + size_t count) { + memcpy(&dest[dpos], &src[spos], count * sizeof(json_t *)); +} + +static json_t **json_array_grow(json_array_t *array, size_t amount, int copy) { + size_t new_size; + json_t **old_table, **new_table; + + if (array->entries + amount <= array->size) + return array->table; + + old_table = array->table; + + new_size = max(array->size + amount, array->size * 2); + new_table = jsonp_malloc(new_size * sizeof(json_t *)); + if (!new_table) + return NULL; + + array->size = new_size; + array->table = new_table; + + if (copy) { + array_copy(array->table, 0, old_table, 0, array->entries); + jsonp_free(old_table); + return array->table; + } + + return old_table; +} + +int json_array_append_new(json_t *json, json_t *value) { + json_array_t *array; + + if (!value) + return -1; + + if (!json_is_array(json) || json == value) { + json_decref(value); + return -1; + } + array = json_to_array(json); + + if (!json_array_grow(array, 1, 1)) { + json_decref(value); + return -1; + } + + array->table[array->entries] = value; + array->entries++; + + return 0; +} + +int json_array_insert_new(json_t *json, size_t index, json_t *value) { + json_array_t *array; + json_t **old_table; + + if (!value) + return -1; + + if (!json_is_array(json) || json == value) { + json_decref(value); + return -1; + } + array = json_to_array(json); + + if (index > array->entries) { + json_decref(value); + return -1; + } + + old_table = json_array_grow(array, 1, 0); + if (!old_table) { + json_decref(value); + return -1; + } + + if (old_table != array->table) { + array_copy(array->table, 0, old_table, 0, index); + array_copy(array->table, index + 1, old_table, index, array->entries - index); + jsonp_free(old_table); + } else + array_move(array, index + 1, index, array->entries - index); + + array->table[index] = value; + array->entries++; + + return 0; +} + +int json_array_remove(json_t *json, size_t index) { + json_array_t *array; + + if (!json_is_array(json)) + return -1; + array = json_to_array(json); + + if (index >= array->entries) + return -1; + + json_decref(array->table[index]); + + /* If we're removing the last element, nothing has to be moved */ + if (index < array->entries - 1) + array_move(array, index, index + 1, array->entries - index - 1); + + array->entries--; + + return 0; +} + +int json_array_clear(json_t *json) { + json_array_t *array; + size_t i; + + if (!json_is_array(json)) + return -1; + array = json_to_array(json); + + for (i = 0; i < array->entries; i++) + json_decref(array->table[i]); + + array->entries = 0; + return 0; +} + +int json_array_extend(json_t *json, json_t *other_json) { + json_array_t *array, *other; + size_t i; + + if (!json_is_array(json) || !json_is_array(other_json)) + return -1; + array = json_to_array(json); + other = json_to_array(other_json); + + if (!json_array_grow(array, other->entries, 1)) + return -1; + + for (i = 0; i < other->entries; i++) + json_incref(other->table[i]); + + array_copy(array->table, array->entries, other->table, 0, other->entries); + + array->entries += other->entries; + return 0; +} + +static int json_array_equal(const json_t *array1, const json_t *array2) { + size_t i, size; + + size = json_array_size(array1); + if (size != json_array_size(array2)) + return 0; + + for (i = 0; i < size; i++) { + json_t *value1, *value2; + + value1 = json_array_get(array1, i); + value2 = json_array_get(array2, i); + + if (!json_equal(value1, value2)) + return 0; + } + + return 1; +} + +static json_t *json_array_copy(json_t *array) { + json_t *result; + size_t i; + + result = json_array(); + if (!result) + return NULL; + + for (i = 0; i < json_array_size(array); i++) + json_array_append(result, json_array_get(array, i)); + + return result; +} + +static json_t *json_array_deep_copy(const json_t *array, hashtable_t *parents) { + json_t *result; + size_t i; + char loop_key[LOOP_KEY_LEN]; + size_t loop_key_len; + + if (jsonp_loop_check(parents, array, loop_key, sizeof(loop_key), &loop_key_len)) + return NULL; + + result = json_array(); + if (!result) + goto out; + + for (i = 0; i < json_array_size(array); i++) { + if (json_array_append_new(result, + do_deep_copy(json_array_get(array, i), parents))) { + json_decref(result); + result = NULL; + break; + } + } + +out: + hashtable_del(parents, loop_key, loop_key_len); + + return result; +} + +/*** string ***/ + +static json_t *string_create(const char *value, size_t len, int own) { + char *v; + json_string_t *string; + + if (!value) + return NULL; + + if (own) + v = (char *)value; + else { + v = jsonp_strndup(value, len); + if (!v) + return NULL; + } + + string = jsonp_malloc(sizeof(json_string_t)); + if (!string) { + jsonp_free(v); + return NULL; + } + json_init(&string->json, JSON_STRING); + string->value = v; + string->length = len; + + return &string->json; +} + +json_t *json_string_nocheck(const char *value) { + if (!value) + return NULL; + + return string_create(value, strlen(value), 0); +} + +json_t *json_stringn_nocheck(const char *value, size_t len) { + return string_create(value, len, 0); +} + +/* this is private; "steal" is not a public API concept */ +json_t *jsonp_stringn_nocheck_own(const char *value, size_t len) { + return string_create(value, len, 1); +} + +json_t *json_string(const char *value) { + if (!value) + return NULL; + + return json_stringn(value, strlen(value)); +} + +json_t *json_stringn(const char *value, size_t len) { + if (!value || !utf8_check_string(value, len)) + return NULL; + + return json_stringn_nocheck(value, len); +} + +const char *json_string_value(const json_t *json) { + if (!json_is_string(json)) + return NULL; + + return json_to_string(json)->value; +} + +size_t json_string_length(const json_t *json) { + if (!json_is_string(json)) + return 0; + + return json_to_string(json)->length; +} + +int json_string_set_nocheck(json_t *json, const char *value) { + if (!value) + return -1; + + return json_string_setn_nocheck(json, value, strlen(value)); +} + +int json_string_setn_nocheck(json_t *json, const char *value, size_t len) { + char *dup; + json_string_t *string; + + if (!json_is_string(json) || !value) + return -1; + + dup = jsonp_strndup(value, len); + if (!dup) + return -1; + + string = json_to_string(json); + jsonp_free(string->value); + string->value = dup; + string->length = len; + + return 0; +} + +int json_string_set(json_t *json, const char *value) { + if (!value) + return -1; + + return json_string_setn(json, value, strlen(value)); +} + +int json_string_setn(json_t *json, const char *value, size_t len) { + if (!value || !utf8_check_string(value, len)) + return -1; + + return json_string_setn_nocheck(json, value, len); +} + +static void json_delete_string(json_string_t *string) { + jsonp_free(string->value); + jsonp_free(string); +} + +static int json_string_equal(const json_t *string1, const json_t *string2) { + json_string_t *s1, *s2; + + s1 = json_to_string(string1); + s2 = json_to_string(string2); + return s1->length == s2->length && !memcmp(s1->value, s2->value, s1->length); +} + +static json_t *json_string_copy(const json_t *string) { + json_string_t *s; + + s = json_to_string(string); + return json_stringn_nocheck(s->value, s->length); +} + +json_t *json_vsprintf(const char *fmt, va_list ap) { + json_t *json = NULL; + int length; + char *buf; + va_list aq; + va_copy(aq, ap); + + length = vsnprintf(NULL, 0, fmt, ap); + if (length < 0) + goto out; + if (length == 0) { + json = json_string(""); + goto out; + } + + buf = jsonp_malloc((size_t)length + 1); + if (!buf) + goto out; + + vsnprintf(buf, (size_t)length + 1, fmt, aq); + if (!utf8_check_string(buf, length)) { + jsonp_free(buf); + goto out; + } + + json = jsonp_stringn_nocheck_own(buf, length); + +out: + va_end(aq); + return json; +} + +json_t *json_sprintf(const char *fmt, ...) { + json_t *result; + va_list ap; + + va_start(ap, fmt); + result = json_vsprintf(fmt, ap); + va_end(ap); + + return result; +} + +/*** integer ***/ + +json_t *json_integer(json_int_t value) { + json_integer_t *integer = jsonp_malloc(sizeof(json_integer_t)); + if (!integer) + return NULL; + json_init(&integer->json, JSON_INTEGER); + + integer->value = value; + return &integer->json; +} + +json_int_t json_integer_value(const json_t *json) { + if (!json_is_integer(json)) + return 0; + + return json_to_integer(json)->value; +} + +int json_integer_set(json_t *json, json_int_t value) { + if (!json_is_integer(json)) + return -1; + + json_to_integer(json)->value = value; + + return 0; +} + +static void json_delete_integer(json_integer_t *integer) { jsonp_free(integer); } + +static int json_integer_equal(const json_t *integer1, const json_t *integer2) { + return json_integer_value(integer1) == json_integer_value(integer2); +} + +static json_t *json_integer_copy(const json_t *integer) { + return json_integer(json_integer_value(integer)); +} + +/*** real ***/ + +json_t *json_real(double value) { + json_real_t *real; + + if (isnan(value) || isinf(value)) + return NULL; + + real = jsonp_malloc(sizeof(json_real_t)); + if (!real) + return NULL; + json_init(&real->json, JSON_REAL); + + real->value = value; + return &real->json; +} + +double json_real_value(const json_t *json) { + if (!json_is_real(json)) + return 0; + + return json_to_real(json)->value; +} + +int json_real_set(json_t *json, double value) { + if (!json_is_real(json) || isnan(value) || isinf(value)) + return -1; + + json_to_real(json)->value = value; + + return 0; +} + +static void json_delete_real(json_real_t *real) { jsonp_free(real); } + +static int json_real_equal(const json_t *real1, const json_t *real2) { + return json_real_value(real1) == json_real_value(real2); +} + +static json_t *json_real_copy(const json_t *real) { + return json_real(json_real_value(real)); +} + +/*** number ***/ + +double json_number_value(const json_t *json) { + if (json_is_integer(json)) + return (double)json_integer_value(json); + else if (json_is_real(json)) + return json_real_value(json); + else + return 0.0; +} + +/*** simple values ***/ + +json_t *json_true(void) { + static json_t the_true = {JSON_TRUE, (size_t)-1}; + return &the_true; +} + +json_t *json_false(void) { + static json_t the_false = {JSON_FALSE, (size_t)-1}; + return &the_false; +} + +json_t *json_null(void) { + static json_t the_null = {JSON_NULL, (size_t)-1}; + return &the_null; +} + +/*** deletion ***/ + +void json_delete(json_t *json) { + if (!json) + return; + + switch (json_typeof(json)) { + case JSON_OBJECT: + json_delete_object(json_to_object(json)); + break; + case JSON_ARRAY: + json_delete_array(json_to_array(json)); + break; + case JSON_STRING: + json_delete_string(json_to_string(json)); + break; + case JSON_INTEGER: + json_delete_integer(json_to_integer(json)); + break; + case JSON_REAL: + json_delete_real(json_to_real(json)); + break; + default: + return; + } + + /* json_delete is not called for true, false or null */ +} + +/*** equality ***/ + +int json_equal(const json_t *json1, const json_t *json2) { + if (!json1 || !json2) + return 0; + + if (json_typeof(json1) != json_typeof(json2)) + return 0; + + /* this covers true, false and null as they are singletons */ + if (json1 == json2) + return 1; + + switch (json_typeof(json1)) { + case JSON_OBJECT: + return json_object_equal(json1, json2); + case JSON_ARRAY: + return json_array_equal(json1, json2); + case JSON_STRING: + return json_string_equal(json1, json2); + case JSON_INTEGER: + return json_integer_equal(json1, json2); + case JSON_REAL: + return json_real_equal(json1, json2); + default: + return 0; + } +} + +/*** copying ***/ + +json_t *json_copy(json_t *json) { + if (!json) + return NULL; + + switch (json_typeof(json)) { + case JSON_OBJECT: + return json_object_copy(json); + case JSON_ARRAY: + return json_array_copy(json); + case JSON_STRING: + return json_string_copy(json); + case JSON_INTEGER: + return json_integer_copy(json); + case JSON_REAL: + return json_real_copy(json); + case JSON_TRUE: + case JSON_FALSE: + case JSON_NULL: + return json; + default: + return NULL; + } +} + +json_t *json_deep_copy(const json_t *json) { + json_t *res; + hashtable_t parents_set; + + if (hashtable_init(&parents_set)) + return NULL; + res = do_deep_copy(json, &parents_set); + hashtable_close(&parents_set); + + return res; +} + +json_t *do_deep_copy(const json_t *json, hashtable_t *parents) { + if (!json) + return NULL; + + switch (json_typeof(json)) { + case JSON_OBJECT: + return json_object_deep_copy(json, parents); + case JSON_ARRAY: + return json_array_deep_copy(json, parents); + /* for the rest of the types, deep copying doesn't differ from + shallow copying */ + case JSON_STRING: + return json_string_copy(json); + case JSON_INTEGER: + return json_integer_copy(json); + case JSON_REAL: + return json_real_copy(json); + case JSON_TRUE: + case JSON_FALSE: + case JSON_NULL: + return (json_t *)json; + default: + return NULL; + } +} diff --git a/solo-ckpool-source/src/jansson-2.14/src/version.c b/solo-ckpool-source/src/jansson-2.14/src/version.c new file mode 100644 index 0000000..f1026af --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/src/version.c @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2019 Sean Bright + * + * Jansson is free software; you can redistribute it and/or modify + * it under the terms of the MIT license. See LICENSE for details. + */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#include "jansson.h" + +const char *jansson_version_str(void) { return JANSSON_VERSION; } + +int jansson_version_cmp(int major, int minor, int micro) { + int diff; + + if ((diff = JANSSON_MAJOR_VERSION - major)) { + return diff; + } + + if ((diff = JANSSON_MINOR_VERSION - minor)) { + return diff; + } + + return JANSSON_MICRO_VERSION - micro; +} diff --git a/solo-ckpool-source/src/jansson-2.14/test-driver b/solo-ckpool-source/src/jansson-2.14/test-driver new file mode 100755 index 0000000..be73b80 --- /dev/null +++ b/solo-ckpool-source/src/jansson-2.14/test-driver @@ -0,0 +1,153 @@ +#! /bin/sh +# test-driver - basic testsuite driver script. + +scriptversion=2018-03-07.03; # UTC + +# Copyright (C) 2011-2021 Free Software Foundation, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# This file is maintained in Automake, please report +# bugs to or send patches to +# . + +# Make unconditional expansion of undefined variables an error. This +# helps a lot in preventing typo-related bugs. +set -u + +usage_error () +{ + echo "$0: $*" >&2 + print_usage >&2 + exit 2 +} + +print_usage () +{ + cat <"$log_file" +"$@" >>"$log_file" 2>&1 +estatus=$? + +if test $enable_hard_errors = no && test $estatus -eq 99; then + tweaked_estatus=1 +else + tweaked_estatus=$estatus +fi + +case $tweaked_estatus:$expect_failure in + 0:yes) col=$red res=XPASS recheck=yes gcopy=yes;; + 0:*) col=$grn res=PASS recheck=no gcopy=no;; + 77:*) col=$blu res=SKIP recheck=no gcopy=yes;; + 99:*) col=$mgn res=ERROR recheck=yes gcopy=yes;; + *:yes) col=$lgn res=XFAIL recheck=no gcopy=yes;; + *:*) col=$red res=FAIL recheck=yes gcopy=yes;; +esac + +# Report the test outcome and exit status in the logs, so that one can +# know whether the test passed or failed simply by looking at the '.log' +# file, without the need of also peaking into the corresponding '.trs' +# file (automake bug#11814). +echo "$res $test_name (exit status: $estatus)" >>"$log_file" + +# Report outcome to console. +echo "${col}${res}${std}: $test_name" + +# Register the test result, and other relevant metadata. +echo ":test-result: $res" > $trs_file +echo ":global-test-result: $res" >> $trs_file +echo ":recheck: $recheck" >> $trs_file +echo ":copy-in-global-log: $gcopy" >> $trs_file + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC0" +# time-stamp-end: "; # UTC" +# End: diff --git a/solo-ckpool-source/src/libckpool.c b/solo-ckpool-source/src/libckpool.c new file mode 100644 index 0000000..3bfe9a5 --- /dev/null +++ b/solo-ckpool-source/src/libckpool.c @@ -0,0 +1,2258 @@ +/* + * Copyright 2014-2018,2023 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include +#include +#ifdef HAVE_LINUX_UN_H +#include +#else +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "libckpool.h" +#include "sha2.h" +#include "utlist.h" + +#ifndef UNIX_PATH_MAX +#define UNIX_PATH_MAX 108 +#endif + +/* We use a weak function as a simple printf within the library that can be + * overridden by however the outside executable wishes to do its logging. */ +void __attribute__((weak)) logmsg(int __maybe_unused loglevel, const char *fmt, ...) +{ + va_list ap; + char *buf; + + va_start(ap, fmt); + VASPRINTF(&buf, fmt, ap); + va_end(ap); + + printf("%s\n", buf); + free(buf); +} + +void rename_proc(const char *name) +{ + char buf[16]; + + snprintf(buf, 15, "ckp@%s", name); + buf[15] = '\0'; + prctl(PR_SET_NAME, buf, 0, 0, 0); +} + +void create_pthread(pthread_t *thread, void *(*start_routine)(void *), void *arg) +{ + int ret = pthread_create(thread, NULL, start_routine, arg); + + if (unlikely(ret)) + quit(1, "Failed to pthread_create"); +} + +void join_pthread(pthread_t thread) +{ + if (!pthread_kill(thread, 0)) + pthread_join(thread, NULL); +} + +struct ck_completion { + sem_t sem; + void (*fn)(void *fnarg); + void *fnarg; +}; + +static void *completion_thread(void *arg) +{ + struct ck_completion *ckc = (struct ck_completion *)arg; + + ckc->fn(ckc->fnarg); + cksem_post(&ckc->sem); + + return NULL; +} + +bool ck_completion_timeout(void *fn, void *fnarg, int timeout) +{ + struct ck_completion ckc; + pthread_t pthread; + bool ret = false; + + cksem_init(&ckc.sem); + ckc.fn = fn; + ckc.fnarg = fnarg; + + pthread_create(&pthread, NULL, completion_thread, (void *)&ckc); + + ret = cksem_mswait(&ckc.sem, timeout); + if (!ret) + pthread_join(pthread, NULL); + else + pthread_cancel(pthread); + return !ret; +} + +int _cond_wait(pthread_cond_t *cond, mutex_t *lock, const char *file, const char *func, const int line) +{ + int ret; + + ret = pthread_cond_wait(cond, &lock->mutex); + lock->file = file; + lock->func = func; + lock->line = line; + return ret; +} + +int _cond_timedwait(pthread_cond_t *cond, mutex_t *lock, const struct timespec *abstime, const char *file, const char *func, const int line) +{ + int ret; + + ret = pthread_cond_timedwait(cond, &lock->mutex, abstime); + lock->file = file; + lock->func = func; + lock->line = line; + return ret; +} + + +int _mutex_timedlock(mutex_t *lock, int timeout, const char *file, const char *func, const int line) +{ + tv_t now; + ts_t abs; + int ret; + + tv_time(&now); + tv_to_ts(&abs, &now); + abs.tv_sec += timeout; + + ret = pthread_mutex_timedlock(&lock->mutex, &abs); + if (!ret) { + lock->file = file; + lock->func = func; + lock->line = line; + } + + return ret; +} + +/* Make every locking attempt warn if we're unable to get the lock for more + * than 10 seconds and fail if we can't get it for longer than a minute. */ +void _mutex_lock(mutex_t *lock, const char *file, const char *func, const int line) +{ + int ret, retries = 0; + +retry: + ret = _mutex_timedlock(lock, 10, file, func, line); + if (unlikely(ret)) { + if (likely(ret == ETIMEDOUT)) { + LOGERR("WARNING: Prolonged mutex lock contention from %s %s:%d, held by %s %s:%d", + file, func, line, lock->file, lock->func, lock->line); + if (++retries < 6) + goto retry; + quitfrom(1, file, func, line, "FAILED TO GRAB MUTEX!"); + } + quitfrom(1, file, func, line, "WTF MUTEX ERROR ON LOCK!"); + } +} + +/* Does not unset lock->file/func/line since they're only relevant when the lock is held */ +void _mutex_unlock(mutex_t *lock, const char *file, const char *func, const int line) +{ + if (unlikely(pthread_mutex_unlock(&lock->mutex))) + quitfrom(1, file, func, line, "WTF MUTEX ERROR ON UNLOCK!"); +} + +int _mutex_trylock(mutex_t *lock, __maybe_unused const char *file, __maybe_unused const char *func, __maybe_unused const int line) +{ + int ret; + + ret = pthread_mutex_trylock(&lock->mutex); + if (!ret) { + lock->file = file; + lock->func = func; + lock->line = line; + } + return ret; +} + +void mutex_destroy(mutex_t *lock) +{ + pthread_mutex_destroy(&lock->mutex); +} + + +static int wr_timedlock(pthread_rwlock_t *lock, int timeout) +{ + tv_t now; + ts_t abs; + int ret; + + tv_time(&now); + tv_to_ts(&abs, &now); + abs.tv_sec += timeout; + + ret = pthread_rwlock_timedwrlock(lock, &abs); + + return ret; +} + +void _wr_lock(rwlock_t *lock, const char *file, const char *func, const int line) +{ + int ret, retries = 0; + +retry: + ret = wr_timedlock(&lock->rwlock, 10); + if (unlikely(ret)) { + if (likely(ret == ETIMEDOUT)) { + LOGERR("WARNING: Prolonged write lock contention from %s %s:%d, held by %s %s:%d", + file, func, line, lock->file, lock->func, lock->line); + if (++retries < 6) + goto retry; + quitfrom(1, file, func, line, "FAILED TO GRAB WRITE LOCK!"); + } + quitfrom(1, file, func, line, "WTF ERROR ON WRITE LOCK!"); + } + lock->file = file; + lock->func = func; + lock->line = line; +} + +int _wr_trylock(rwlock_t *lock, __maybe_unused const char *file, __maybe_unused const char *func, __maybe_unused const int line) +{ + int ret = pthread_rwlock_trywrlock(&lock->rwlock); + + if (!ret) { + lock->file = file; + lock->func = func; + lock->line = line; + } + return ret; +} + +static int rd_timedlock(pthread_rwlock_t *lock, int timeout) +{ + tv_t now; + ts_t abs; + int ret; + + tv_time(&now); + tv_to_ts(&abs, &now); + abs.tv_sec += timeout; + + ret = pthread_rwlock_timedrdlock(lock, &abs); + + return ret; +} + +void _rd_lock(rwlock_t *lock, const char *file, const char *func, const int line) +{ + int ret, retries = 0; + +retry: + ret = rd_timedlock(&lock->rwlock, 10); + if (unlikely(ret)) { + if (likely(ret == ETIMEDOUT)) { + LOGERR("WARNING: Prolonged read lock contention from %s %s:%d, held by %s %s:%d", + file, func, line, lock->file, lock->func, lock->line); + if (++retries < 6) + goto retry; + quitfrom(1, file, func, line, "FAILED TO GRAB READ LOCK!"); + } + quitfrom(1, file, func, line, "WTF ERROR ON READ LOCK!"); + } + lock->file = file; + lock->func = func; + lock->line = line; +} + +void _rw_unlock(rwlock_t *lock, const char *file, const char *func, const int line) +{ + if (unlikely(pthread_rwlock_unlock(&lock->rwlock))) + quitfrom(1, file, func, line, "WTF RWLOCK ERROR ON UNLOCK!"); +} + +void _rd_unlock(rwlock_t *lock, const char *file, const char *func, const int line) +{ + _rw_unlock(lock, file, func, line); +} + +void _wr_unlock(rwlock_t *lock, const char *file, const char *func, const int line) +{ + _rw_unlock(lock, file, func, line); +} + +void _mutex_init(mutex_t *lock, const char *file, const char *func, const int line) +{ + if (unlikely(pthread_mutex_init(&lock->mutex, NULL))) + quitfrom(1, file, func, line, "Failed to pthread_mutex_init"); +} + +void _rwlock_init(rwlock_t *lock, const char *file, const char *func, const int line) +{ + if (unlikely(pthread_rwlock_init(&lock->rwlock, NULL))) + quitfrom(1, file, func, line, "Failed to pthread_rwlock_init"); +} + + +void _cond_init(pthread_cond_t *cond, const char *file, const char *func, const int line) +{ + if (unlikely(pthread_cond_init(cond, NULL))) + quitfrom(1, file, func, line, "Failed to pthread_cond_init!"); +} + +void _cklock_init(cklock_t *lock, const char *file, const char *func, const int line) +{ + _mutex_init(&lock->mutex, file, func, line); + _rwlock_init(&lock->rwlock, file, func, line); +} + + +/* Read lock variant of cklock. Cannot be promoted. */ +void _ck_rlock(cklock_t *lock, const char *file, const char *func, const int line) +{ + _mutex_lock(&lock->mutex, file, func, line); + _rd_lock(&lock->rwlock, file, func, line); + _mutex_unlock(&lock->mutex, file, func, line); +} + +/* Write lock variant of cklock */ +void _ck_wlock(cklock_t *lock, const char *file, const char *func, const int line) +{ + _mutex_lock(&lock->mutex, file, func, line); + _wr_lock(&lock->rwlock, file, func, line); +} + +/* Downgrade write variant to a read lock */ +void _ck_dwlock(cklock_t *lock, const char *file, const char *func, const int line) +{ + _wr_unlock(&lock->rwlock, file, func, line); + _rd_lock(&lock->rwlock, file, func, line); + _mutex_unlock(&lock->mutex, file, func, line); +} + +/* Demote a write variant to an intermediate variant */ +void _ck_dwilock(cklock_t *lock, const char *file, const char *func, const int line) +{ + _wr_unlock(&lock->rwlock, file, func, line); +} + +void _ck_runlock(cklock_t *lock, const char *file, const char *func, const int line) +{ + _rd_unlock(&lock->rwlock, file, func, line); +} + +void _ck_wunlock(cklock_t *lock, const char *file, const char *func, const int line) +{ + _wr_unlock(&lock->rwlock, file, func, line); + _mutex_unlock(&lock->mutex, file, func, line); +} + +void cklock_destroy(cklock_t *lock) +{ + pthread_rwlock_destroy(&lock->rwlock.rwlock); + pthread_mutex_destroy(&lock->mutex.mutex); +} + + +void _cksem_init(sem_t *sem, const char *file, const char *func, const int line) +{ + int ret; + if ((ret = sem_init(sem, 0, 0))) + quitfrom(1, file, func, line, "Failed to sem_init ret=%d errno=%d", ret, errno); +} + +void _cksem_post(sem_t *sem, const char *file, const char *func, const int line) +{ + if (unlikely(sem_post(sem))) + quitfrom(1, file, func, line, "Failed to sem_post errno=%d sem=0x%p", errno, sem); +} + +void _cksem_wait(sem_t *sem, const char *file, const char *func, const int line) +{ + if (unlikely(sem_wait(sem))) { + if (errno == EINTR) + return; + quitfrom(1, file, func, line, "Failed to sem_wait errno=%d sem=0x%p", errno, sem); + } +} + +int _cksem_trywait(sem_t *sem, const char *file, const char *func, const int line) +{ + int ret = sem_trywait(sem); + + if (unlikely(ret && errno != EAGAIN && errno != EINTR)) + quitfrom(1, file, func, line, "Failed to sem_trywait errno=%d sem=0x%p", errno, sem); + return ret; +} + +int _cksem_mswait(sem_t *sem, int ms, const char *file, const char *func, const int line) +{ + ts_t abs_timeout, ts_now; + tv_t tv_now; + int ret; + + tv_time(&tv_now); + tv_to_ts(&ts_now, &tv_now); + ms_to_ts(&abs_timeout, ms); + timeraddspec(&abs_timeout, &ts_now); + ret = sem_timedwait(sem, &abs_timeout); + + if (ret) { + if (likely(errno == ETIMEDOUT)) + return ETIMEDOUT; + if (errno == EINTR) + return EINTR; + quitfrom(1, file, func, line, "Failed to sem_timedwait errno=%d sem=0x%p", errno, sem); + } + return 0; +} + +void _cksem_destroy(sem_t *sem, const char *file, const char *func, const int line) +{ + + if (unlikely(sem_destroy(sem))) + quitfrom(1, file, func, line, "Failed to sem_destroy errno=%d sem=0x%p", errno, sem); +} + +/* Extract just the url and port information from a url string, allocating + * heap memory for sockaddr_url and sockaddr_port. */ +bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port) +{ + char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL; + char *url_address, *port, *tmp; + int url_len, port_len = 0; + size_t hlen; + + if (!url) { + LOGWARNING("Null length url string passed to extract_sockaddr"); + return false; + } + url_begin = strstr(url, "//"); + if (!url_begin) + url_begin = url; + else + url_begin += 2; + + /* Look for numeric ipv6 entries */ + ipv6_begin = strstr(url_begin, "["); + ipv6_end = strstr(url_begin, "]"); + if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin) + url_end = strstr(ipv6_end, ":"); + else + url_end = strstr(url_begin, ":"); + if (url_end) { + url_len = url_end - url_begin; + port_len = strlen(url_begin) - url_len - 1; + if (port_len < 1) + return false; + port_start = url_end + 1; + } else + url_len = strlen(url_begin); + + /* Get rid of the [] */ + if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin){ + url_len -= 2; + url_begin++; + } + + if (url_len < 1) { + LOGWARNING("Null length URL passed to extract_sockaddr"); + return false; + } + + hlen = url_len + 1; + url_address = ckalloc(hlen); + sprintf(url_address, "%.*s", url_len, url_begin); + + port = ckalloc(8); + if (port_len) { + char *slash; + + snprintf(port, 6, "%.*s", port_len, port_start); + slash = strchr(port, '/'); + if (slash) + *slash = '\0'; + } else + strcpy(port, "80"); + + /* + * This function may be called with sockaddr_* already set as it may + * be getting updated so we need to free the old entries safely. + * Use a temporary variable so they never dereference */ + if (*sockaddr_port && !safecmp(*sockaddr_port, port)) + free(port); + else { + tmp = *sockaddr_port; + *sockaddr_port = port; + free(tmp); + } + if (*sockaddr_url && !safecmp(*sockaddr_url, url_address)) + free(url_address); + else { + tmp = *sockaddr_url; + *sockaddr_url = url_address; + free(tmp); + } + + return true; +} + +/* Convert a sockaddr structure into a url and port. URL should be a string of + * INET6_ADDRSTRLEN size, port at least a string of 6 bytes */ +bool url_from_sockaddr(const struct sockaddr *addr, char *url, char *port) +{ + int port_no = 0; + + switch(addr->sa_family) { + const struct sockaddr_in *inet4_in; + const struct sockaddr_in6 *inet6_in; + + case AF_INET: + inet4_in = (struct sockaddr_in *)addr; + inet_ntop(AF_INET, &inet4_in->sin_addr, url, INET6_ADDRSTRLEN); + port_no = htons(inet4_in->sin_port); + break; + case AF_INET6: + inet6_in = (struct sockaddr_in6 *)addr; + inet_ntop(AF_INET6, &inet6_in->sin6_addr, url, INET6_ADDRSTRLEN); + port_no = htons(inet6_in->sin6_port); + break; + default: + return false; + } + sprintf(port, "%d", port_no); + return true; +} + +/* Helper for getaddrinfo with the same API that retries while getting + * EAI_AGAIN error */ +static int addrgetinfo(const char *node, const char *service, + const struct addrinfo *hints, + struct addrinfo **res) +{ + int ret; + + do { + ret = getaddrinfo(node, service, hints, res); + } while (ret == EAI_AGAIN); + + return ret; +} + + +bool addrinfo_from_url(const char *url, const char *port, struct addrinfo *addrinfo) +{ + struct addrinfo *servinfo, hints; + + memset(&hints, 0, sizeof(struct addrinfo)); + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + servinfo = addrinfo; + if (addrgetinfo(url, port, &hints, &servinfo) != 0) + return false; + if (!servinfo) + return false; + memcpy(addrinfo, servinfo->ai_addr, servinfo->ai_addrlen); + freeaddrinfo(servinfo); + return true; +} + +/* Extract a resolved url and port from a serverurl string. newurl must be + * a string of at least INET6_ADDRSTRLEN and newport at least 6 bytes. */ +bool url_from_serverurl(char *serverurl, char *newurl, char *newport) +{ + char *url = NULL, *port = NULL; + struct addrinfo addrinfo; + bool ret = false; + + if (!extract_sockaddr(serverurl, &url, &port)) { + LOGWARNING("Failed to extract server address from %s", serverurl); + goto out; + } + if (!addrinfo_from_url(url, port, &addrinfo)) { + LOGWARNING("Failed to extract addrinfo from url %s:%s", url, port); + goto out; + } + if (!url_from_sockaddr((const struct sockaddr *)&addrinfo, newurl, newport)) { + LOGWARNING("Failed to extract url from sockaddr for original url: %s:%s", + url, port); + goto out; + } + ret = true; +out: + dealloc(url); + dealloc(port); + return ret; +} + +/* Convert a socket into a url and port. URL should be a string of + * INET6_ADDRSTRLEN size, port at least a string of 6 bytes */ +bool url_from_socket(const int sockd, char *url, char *port) +{ + struct sockaddr_storage storage; + socklen_t addrlen = sizeof(struct sockaddr_storage); + struct sockaddr *addr = (struct sockaddr *)&storage; + + if (sockd < 1) + return false; + if (getsockname(sockd, addr, &addrlen)) + return false; + if (!url_from_sockaddr(addr, url, port)) + return false; + return true; +} + + +void keep_sockalive(int fd) +{ + const int tcp_one = 1; + const int tcp_keepidle = 45; + const int tcp_keepintvl = 30; + + setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (const void *)&tcp_one, sizeof(tcp_one)); + setsockopt(fd, SOL_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one)); + setsockopt(fd, SOL_TCP, TCP_KEEPCNT, &tcp_one, sizeof(tcp_one)); + setsockopt(fd, SOL_TCP, TCP_KEEPIDLE, &tcp_keepidle, sizeof(tcp_keepidle)); + setsockopt(fd, SOL_TCP, TCP_KEEPINTVL, &tcp_keepintvl, sizeof(tcp_keepintvl)); +} + +void nolinger_socket(int fd) +{ + const struct linger so_linger = { 1, 0 }; + + setsockopt(fd, SOL_SOCKET, SO_LINGER, &so_linger, sizeof(so_linger)); +} + +void noblock_socket(int fd) +{ + int flags = fcntl(fd, F_GETFL, 0); + + fcntl(fd, F_SETFL, O_NONBLOCK | flags); +} + +void block_socket(int fd) +{ + int flags = fcntl(fd, F_GETFL, 0); + + fcntl(fd, F_SETFL, flags & ~O_NONBLOCK); +} + +void _close(int *fd, const char *file, const char *func, const int line) +{ + int sockd; + + if (*fd < 0) + return; + sockd = *fd; + LOGDEBUG("Closing file handle %d", sockd); + *fd = -1; + if (unlikely(close(sockd))) { + LOGWARNING("Close of fd %d failed with errno %d:%s from %s %s:%d", + sockd, errno, strerror(errno), file, func, line); + } +} + +int bind_socket(char *url, char *port) +{ + struct addrinfo servinfobase, *servinfo, hints, *p; + int ret, sockd = -1; + const int on = 1; + + memset(&hints, 0, sizeof(struct addrinfo)); + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + servinfo = &servinfobase; + + if (addrgetinfo(url, port, &hints, &servinfo) != 0) { + LOGWARNING("Failed to resolve (?wrong URL) %s:%s", url, port); + return sockd; + } + for (p = servinfo; p != NULL; p = p->ai_next) { + sockd = socket(p->ai_family, p->ai_socktype, p->ai_protocol); + if (sockd > 0) + break; + } + if (sockd < 1 || p == NULL) { + LOGWARNING("Failed to open socket for %s:%s", url, port); + goto out; + } + setsockopt(sockd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); + ret = bind(sockd, p->ai_addr, p->ai_addrlen); + if (ret < 0) { + LOGWARNING("Failed to bind socket for %s:%s", url, port); + Close(sockd); + goto out; + } + +out: + freeaddrinfo(servinfo); + return sockd; +} + +int connect_socket(char *url, char *port) +{ + struct addrinfo servinfobase, *servinfo, hints, *p; + int sockd = -1; + + memset(&hints, 0, sizeof(struct addrinfo)); + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + memset(&servinfobase, 0, sizeof(struct addrinfo)); + servinfo = &servinfobase; + + if (addrgetinfo(url, port, &hints, &servinfo) != 0) { + LOGWARNING("Failed to resolve (?wrong URL) %s:%s", url, port); + goto out; + } + + for (p = servinfo; p != NULL; p = p->ai_next) { + sockd = socket(p->ai_family, p->ai_socktype, p->ai_protocol); + if (sockd == -1) { + LOGDEBUG("Failed socket"); + continue; + } + + /* Iterate non blocking over entries returned by getaddrinfo + * to cope with round robin DNS entries, finding the first one + * we can connect to quickly. */ + noblock_socket(sockd); + if (connect(sockd, p->ai_addr, p->ai_addrlen) == -1) { + int selret; + + if (!sock_connecting()) { + Close(sockd); + LOGDEBUG("Failed sock connect"); + continue; + } + selret = wait_write_select(sockd, 5); + if (selret > 0) { + socklen_t len; + int err, n; + + len = sizeof(err); + n = getsockopt(sockd, SOL_SOCKET, SO_ERROR, (void *)&err, &len); + if (!n && !err) { + LOGDEBUG("Succeeded delayed connect"); + block_socket(sockd); + break; + } + } + Close(sockd); + LOGDEBUG("Select timeout/failed connect"); + continue; + } + LOGDEBUG("Succeeded immediate connect"); + if (sockd >= 0) + block_socket(sockd); + + break; + } + if (p == NULL) { + LOGINFO("Failed to connect to %s:%s", url, port); + sockd = -1; + } + freeaddrinfo(servinfo); +out: + return sockd; +} + +/* Measure the minimum round trip time it should take to get to a url by attempting + * to connect to what should be a closed socket on port 1042. This is a blocking + * function so can take many seconds. Returns 0 on failure */ +int round_trip(char *url) +{ + struct addrinfo servinfobase, *p, hints; + int sockd = -1, ret = 0, i, diff; + tv_t start_tv, end_tv; + char port[] = "1042"; + + memset(&hints, 0, sizeof(struct addrinfo)); + hints.ai_family = AF_UNSPEC; + hints.ai_socktype = SOCK_STREAM; + memset(&servinfobase, 0, sizeof(struct addrinfo)); + p = &servinfobase; + + if (addrgetinfo(url, port, &hints, &p) != 0) { + LOGWARNING("Failed to resolve (?wrong URL) %s:%s", url, port); + return ret; + } + /* This function should be called only on already-resolved IP addresses so + * we only need to use the first result from servinfobase */ + sockd = socket(p->ai_family, p->ai_socktype, p->ai_protocol); + if (sockd == -1) { + LOGERR("Failed socket"); + goto out; + } + /* Attempt to connect 5 times to what should be a closed port and measure + * the time it takes to get a refused message */ + for (i = 0; i < 5; i++) { + tv_time(&start_tv); + if (!connect(sockd, p->ai_addr, p->ai_addrlen) || errno != ECONNREFUSED) { + LOGINFO("Unable to get round trip due to %s:%s connect not being refused", + url, port); + goto out; + } + tv_time(&end_tv); + diff = ms_tvdiff(&end_tv, &start_tv); + if (!ret || diff < ret) + ret = diff; + } + if (ret > 500) { + LOGINFO("Round trip to %s:%s greater than 500ms at %d, clamping to 500", + url, port, diff); + diff = 500; + } + LOGINFO("Minimum round trip to %s:%s calculated as %dms", url, port, ret); +out: + Close(sockd); + freeaddrinfo(p); + return ret; +} + +int write_socket(int fd, const void *buf, size_t nbyte) +{ + int ret; + + ret = wait_write_select(fd, 5); + if (ret < 1) { + if (!ret) + LOGNOTICE("Select timed out in write_socket"); + else + LOGNOTICE("Select failed in write_socket"); + goto out; + } + ret = write_length(fd, buf, nbyte); + if (ret < 0) + LOGNOTICE("Failed to write in write_socket"); +out: + return ret; +} + +void empty_socket(int fd) +{ + char buf[PAGESIZE]; + int ret; + + if (fd < 1) + return; + + do { + ret = recv(fd, buf, PAGESIZE - 1, MSG_DONTWAIT); + if (ret > 0) { + buf[ret] = 0; + LOGDEBUG("Discarding: %s", buf); + } + } while (ret > 0); +} + +void _close_unix_socket(int *sockd, const char *server_path) +{ + LOGDEBUG("Closing unix socket %d %s", *sockd, server_path); + _Close(sockd); +} + +int _open_unix_server(const char *server_path, const char *file, const char *func, const int line) +{ + mode_t mode = S_IRWXU | S_IRWXG; // Owner+Group RWX + struct sockaddr_un serveraddr; + int sockd = -1, len, ret; + struct stat buf; + + if (likely(server_path)) { + len = strlen(server_path); + if (unlikely(len < 1 || len >= UNIX_PATH_MAX)) { + LOGERR("Invalid server path length %d in open_unix_server", len); + goto out; + } + } else { + LOGERR("Null passed as server_path to open_unix_server"); + goto out; + } + + if (!stat(server_path, &buf)) { + if ((buf.st_mode & S_IFMT) == S_IFSOCK) { + ret = unlink(server_path); + if (ret) { + LOGERR("Unlink of %s failed in open_unix_server", server_path); + goto out; + } + LOGDEBUG("Unlinked %s to recreate socket", server_path); + } else { + LOGWARNING("%s already exists and is not a socket, not removing", + server_path); + goto out; + } + } + + sockd = socket(AF_UNIX, SOCK_STREAM, 0); + if (unlikely(sockd < 0)) { + LOGERR("Failed to open socket in open_unix_server"); + goto out; + } + memset(&serveraddr, 0, sizeof(serveraddr)); + serveraddr.sun_family = AF_UNIX; + strcpy(serveraddr.sun_path, server_path); + + ret = bind(sockd, (struct sockaddr *)&serveraddr, sizeof(serveraddr)); + if (unlikely(ret < 0)) { + LOGERR("Failed to bind to socket in open_unix_server"); + close_unix_socket(sockd, server_path); + sockd = -1; + goto out; + } + + ret = chmod(server_path, mode); + if (unlikely(ret < 0)) + LOGERR("Failed to set mode in open_unix_server - continuing"); + + ret = listen(sockd, SOMAXCONN); + if (unlikely(ret < 0)) { + LOGERR("Failed to listen to socket in open_unix_server"); + close_unix_socket(sockd, server_path); + sockd = -1; + goto out; + } + + LOGDEBUG("Opened server path %s successfully on socket %d", server_path, sockd); +out: + if (unlikely(sockd == -1)) + LOGERR("Failure in open_unix_server from %s %s:%d", file, func, line); + return sockd; +} + +int _open_unix_client(const char *server_path, const char *file, const char *func, const int line) +{ + struct sockaddr_un serveraddr; + int sockd = -1, len, ret; + + if (likely(server_path)) { + len = strlen(server_path); + if (unlikely(len < 1 || len >= UNIX_PATH_MAX)) { + LOGERR("Invalid server path length %d in open_unix_client", len); + goto out; + } + } else { + LOGERR("Null passed as server_path to open_unix_client"); + goto out; + } + + sockd = socket(AF_UNIX, SOCK_STREAM, 0); + if (unlikely(sockd < 0)) { + LOGERR("Failed to open socket in open_unix_client"); + goto out; + } + memset(&serveraddr, 0, sizeof(serveraddr)); + serveraddr.sun_family = AF_UNIX; + strcpy(serveraddr.sun_path, server_path); + + ret = connect(sockd, (struct sockaddr *)&serveraddr, sizeof(serveraddr)); + if (unlikely(ret < 0)) { + LOGERR("Failed to bind to socket in open_unix_client"); + Close(sockd); + goto out; + } + + LOGDEBUG("Opened client path %s successfully on socket %d", server_path, sockd); +out: + if (unlikely(sockd == -1)) + LOGERR("Failure in open_unix_client from %s %s:%d", file, func, line); + return sockd; +} + +/* Wait till a socket has been closed at the other end */ +int wait_close(int sockd, int timeout) +{ + struct pollfd sfd; + int ret; + + if (unlikely(sockd < 0)) + return -1; + sfd.fd = sockd; + sfd.events = POLLRDHUP; + sfd.revents = 0; + timeout *= 1000; + ret = poll(&sfd, 1, timeout); + if (ret < 1) + return 0; + return sfd.revents & (POLLHUP | POLLRDHUP | POLLERR); +} + +/* Emulate a select read wait for high fds that select doesn't support. */ +int wait_read_select(int sockd, float timeout) +{ + struct epoll_event event = {0, {NULL}}; + int epfd, ret; + + epfd = epoll_create1(EPOLL_CLOEXEC); + event.events = EPOLLIN | EPOLLRDHUP; + epoll_ctl(epfd, EPOLL_CTL_ADD, sockd, &event); + timeout *= 1000; + ret = epoll_wait(epfd, &event, 1, timeout); + close(epfd); + return ret; +} + +int read_length(int sockd, void *buf, int len) +{ + int ret, ofs = 0; + + if (unlikely(len < 1)) { + LOGWARNING("Invalid read length of %d requested in read_length", len); + return -1; + } + if (unlikely(sockd < 0)) + return -1; + while (len) { + ret = recv(sockd, buf + ofs, len, MSG_WAITALL); + if (unlikely(ret < 1)) + return -1; + ofs += ret; + len -= ret; + } + return ofs; +} + +/* Use a standard message across the unix sockets: + * 4 byte length of message as little endian encoded uint32_t followed by the + * string. Return NULL in case of failure. */ +char *_recv_unix_msg(int sockd, int timeout1, int timeout2, const char *file, const char *func, const int line) +{ + char *buf = NULL; + uint32_t msglen; + int ret, ern; + + ret = wait_read_select(sockd, timeout1); + if (unlikely(ret < 1)) { + ern = errno; + LOGERR("Select1 failed in recv_unix_msg (%d)", ern); + goto out; + } + /* Get message length */ + ret = read_length(sockd, &msglen, 4); + if (unlikely(ret < 4)) { + ern = errno; + LOGERR("Failed to read 4 byte length in recv_unix_msg (%d?)", ern); + goto out; + } + msglen = le32toh(msglen); + if (unlikely(msglen < 1 || msglen > 0x80000000)) { + LOGWARNING("Invalid message length %u sent to recv_unix_msg", msglen); + goto out; + } + ret = wait_read_select(sockd, timeout2); + if (unlikely(ret < 1)) { + ern = errno; + LOGERR("Select2 failed in recv_unix_msg (%d)", ern); + goto out; + } + buf = ckzalloc(msglen + 1); + ret = read_length(sockd, buf, msglen); + if (unlikely(ret < (int)msglen)) { + ern = errno; + LOGERR("Failed to read %u bytes in recv_unix_msg (%d?)", msglen, ern); + dealloc(buf); + } +out: + shutdown(sockd, SHUT_RD); + if (unlikely(!buf)) + LOGERR("Failure in recv_unix_msg from %s %s:%d", file, func, line); + return buf; +} + +/* Emulate a select write wait for high fds that select doesn't support */ +int wait_write_select(int sockd, float timeout) +{ + struct epoll_event event = {0, {NULL}}; + int epfd, ret; + + epfd = epoll_create1(EPOLL_CLOEXEC); + event.events = EPOLLOUT | EPOLLRDHUP ; + epoll_ctl(epfd, EPOLL_CTL_ADD, sockd, &event); + timeout *= 1000; + ret = epoll_wait(epfd, &event, 1, timeout); + close(epfd); + return ret; +} + +int _write_length(int sockd, const void *buf, int len, const char *file, const char *func, const int line) +{ + int ret, ofs = 0, ern; + + if (unlikely(len < 1)) { + LOGWARNING("Invalid write length of %d requested in write_length from %s %s:%d", + len, file, func, line); + return -1; + } + if (unlikely(sockd < 0)) { + ern = errno; + LOGWARNING("Attempt to write to invalidated sock in write_length from %s %s:%d", + file, func, line); + return -1; + } + while (len) { + ret = write(sockd, buf + ofs, len); + if (unlikely(ret < 0)) { + ern = errno; + LOGERR("Failed to write %d bytes in write_length (%d) from %s %s:%d", + len, ern, file, func, line); + return -1; + } + ofs += ret; + len -= ret; + } + return ofs; +} + +bool _send_unix_msg(int sockd, const char *buf, int timeout, const char *file, const char *func, const int line) +{ + uint32_t msglen, len; + bool retval = false; + int ret, ern; + + if (unlikely(sockd < 0)) { + LOGWARNING("Attempting to send unix message to invalidated sockd %d", sockd); + goto out; + } + if (unlikely(!buf)) { + LOGWARNING("Null message sent to send_unix_msg"); + goto out; + } + len = strlen(buf); + if (unlikely(!len)) { + LOGWARNING("Zero length message sent to send_unix_msg"); + goto out; + } + msglen = htole32(len); + ret = wait_write_select(sockd, timeout); + if (unlikely(ret < 1)) { + ern = errno; + LOGERR("Select1 failed in send_unix_msg (%d)", ern); + goto out; + } + ret = _write_length(sockd, &msglen, 4, file, func, line); + if (unlikely(ret < 4)) { + LOGERR("Failed to write 4 byte length in send_unix_msg"); + goto out; + } + ret = wait_write_select(sockd, timeout); + if (unlikely(ret < 1)) { + ern = errno; + LOGERR("Select2 failed in send_unix_msg (%d)", ern); + goto out; + } + ret = _write_length(sockd, buf, len, file, func, line); + if (unlikely(ret < 0)) { + LOGERR("Failed to write %d bytes in send_unix_msg", len); + goto out; + } + retval = true; +out: + shutdown(sockd, SHUT_WR); + if (unlikely(!retval)) + LOGERR("Failure in send_unix_msg from %s %s:%d", file, func, line); + return retval; +} + +bool _send_unix_data(int sockd, const struct msghdr *msg, const char *file, const char *func, const int line) +{ + bool retval = false; + int ret; + + if (unlikely(!msg)) { + LOGWARNING("Null message sent to send_unix_data"); + goto out; + } + ret = wait_write_select(sockd, UNIX_WRITE_TIMEOUT); + if (unlikely(ret < 1)) { + LOGERR("Select1 failed in send_unix_data"); + goto out; + } + ret = sendmsg(sockd, msg, 0); + if (unlikely(ret < 1)) { + LOGERR("Failed to send in send_unix_data"); + goto out; + } + retval = true; +out: + shutdown(sockd, SHUT_WR); + if (unlikely(!retval)) + LOGERR("Failure in send_unix_data from %s %s:%d", file, func, line); + return retval; +} + +bool _recv_unix_data(int sockd, struct msghdr *msg, const char *file, const char *func, const int line) +{ + bool retval = false; + int ret; + + ret = wait_read_select(sockd, UNIX_READ_TIMEOUT); + if (unlikely(ret < 1)) { + LOGERR("Select1 failed in recv_unix_data"); + goto out; + } + ret = recvmsg(sockd, msg, MSG_WAITALL); + if (unlikely(ret < 0)) { + LOGERR("Failed to recv in recv_unix_data"); + goto out; + } + retval = true; +out: + shutdown(sockd, SHUT_RD); + if (unlikely(!retval)) + LOGERR("Failure in recv_unix_data from %s %s:%d", file, func, line); + return retval; +} + +#define CONTROLLLEN CMSG_LEN(sizeof(int)) +#define MAXLINE 4096 + +/* Send a msghdr containing fd via the unix socket sockd */ +bool _send_fd(int fd, int sockd, const char *file, const char *func, const int line) +{ + struct cmsghdr *cmptr = ckzalloc(CONTROLLLEN); + struct iovec iov[1]; + struct msghdr msg; + char buf[2]; + bool ret; + int *cm; + + memset(&msg, 0, sizeof(struct msghdr)); + iov[0].iov_base = buf; + iov[0].iov_len = 2; + msg.msg_iov = iov; + msg.msg_iovlen = 1; + msg.msg_name = NULL; + msg.msg_namelen = 0; + msg.msg_controllen = CONTROLLLEN; + msg.msg_control = cmptr; + cmptr->cmsg_level = SOL_SOCKET; + cmptr->cmsg_type = SCM_RIGHTS; + cmptr->cmsg_len = CONTROLLLEN; + cm = (int *)CMSG_DATA(cmptr); + *cm = fd; + buf[1] = 0; + buf[0] = 0; + ret = send_unix_data(sockd, &msg); + free(cmptr); + if (!ret) + LOGERR("Failed to send_unix_data in send_fd from %s %s:%d", file, func, line); + return ret; +} + +/* Receive an fd by reading a msghdr from the unix socket sockd */ +int _get_fd(int sockd, const char *file, const char *func, const int line) +{ + int newfd = -1; + char buf[MAXLINE]; + struct iovec iov[1]; + struct msghdr msg; + struct cmsghdr *cmptr = ckzalloc(CONTROLLLEN); + int *cm; + + memset(&msg, 0, sizeof(struct msghdr)); + iov[0].iov_base = buf; + iov[0].iov_len = sizeof(buf); + msg.msg_iov = iov; + msg.msg_name = NULL; + msg.msg_namelen = 0; + msg.msg_control = cmptr; + msg.msg_controllen = CONTROLLLEN; + if (!recv_unix_data(sockd, &msg)) { + LOGERR("Failed to recv_unix_data in get_fd from %s %s:%d", file, func, line); + goto out; + } +out: + cm = (int *)CMSG_DATA(cmptr); + newfd = *cm; + free(cmptr); + return newfd; +} + + +void _json_check(json_t *val, json_error_t *err, const char *file, const char *func, const int line) +{ + if (likely(val)) + return; + + LOGERR("Invalid json line:%d col:%d pos:%d text: %s from %s %s:%d", + err->line, err->column, err->position, err->text, + file, func, line); +} + +/* Extracts a string value from a json array with error checking. To be used + * when the value of the string returned is only examined and not to be stored. + * See json_array_string below */ +const char *__json_array_string(json_t *val, unsigned int entry) +{ + json_t *arr_entry; + + if (json_is_null(val)) + return NULL; + if (!json_is_array(val)) + return NULL; + if (entry > json_array_size(val)) + return NULL; + arr_entry = json_array_get(val, entry); + if (!json_is_string(arr_entry)) + return NULL; + + return json_string_value(arr_entry); +} + +/* Creates a freshly malloced dup of __json_array_string */ +char *json_array_string(json_t *val, unsigned int entry) +{ + const char *buf = __json_array_string(val, entry); + + if (buf) + return strdup(buf); + return NULL; +} + +json_t *json_object_dup(json_t *val, const char *entry) +{ + return json_copy(json_object_get(val, entry)); +} + +char *rotating_filename(const char *path, time_t when) +{ + char *filename; + struct tm tm; + + gmtime_r(&when, &tm); + ASPRINTF(&filename, "%s%04d%02d%02d%02d.log", path, tm.tm_year + 1900, tm.tm_mon + 1, + tm.tm_mday, tm.tm_hour); + return filename; +} + +/* Creates a logfile entry which changes filename hourly with exclusive access */ +bool rotating_log(const char *path, const char *msg) +{ + mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; + char *filename; + FILE *fp; + int fd; + bool ok = false; + + filename = rotating_filename(path, time(NULL)); + fd = open(filename, O_CREAT | O_RDWR | O_CLOEXEC , mode); + if (unlikely(fd == -1)) { + LOGERR("Failed to open %s in rotating_log!", filename); + goto stageleft; + } + fp = fdopen(fd, "ae"); + if (unlikely(!fp)) { + Close(fd); + LOGERR("Failed to fdopen %s in rotating_log!", filename); + goto stageleft; + } + if (unlikely(flock(fd, LOCK_EX))) { + fclose(fp); + LOGERR("Failed to flock %s in rotating_log!", filename); + goto stageleft; + } + fprintf(fp, "%s\n", msg); + fclose(fp); + ok = true; + +stageleft: + free(filename); + + return ok; +} + +/* Align a size_t to 4 byte boundaries for fussy arches */ +void align_len(size_t *len) +{ + if (*len % 4) + *len += 4 - (*len % 4); +} + +/* Malloc failure should be fatal but keep backing off and retrying as the OS + * will kill us eventually if it can't recover. */ +void realloc_strcat(char **ptr, const char *s) +{ + size_t old, new, len; + int backoff = 1; + void *new_ptr; + char *ofs; + + if (unlikely(!*s)) { + LOGWARNING("Passed empty pointer to realloc_strcat"); + return; + } + new = strlen(s); + if (unlikely(!new)) { + LOGWARNING("Passed empty string to realloc_strcat"); + return; + } + if (!*ptr) + old = 0; + else + old = strlen(*ptr); + len = old + new + 1; + len = round_up_page(len); + while (42) { + new_ptr = realloc(*ptr, len); + if (likely(new_ptr)) + break; + if (backoff == 1) + fprintf(stderr, "Failed to realloc %d, retrying\n", (int)len); + cksleep_ms(backoff); + backoff <<= 1; + } + *ptr = new_ptr; + ofs = *ptr + old; + sprintf(ofs, "%s", s); +} + +void trail_slash(char **buf) +{ + int ofs; + + ofs = strlen(*buf) - 1; + if (memcmp(*buf + ofs, "/", 1)) + realloc_strcat(buf, "/"); +} + +void *_ckalloc(size_t len, const char *file, const char *func, const int line) +{ + int backoff = 1; + void *ptr; + + align_len(&len); + while (42) { + ptr = malloc(len); + if (likely(ptr)) + break; + if (backoff == 1) { + fprintf(stderr, "Failed to ckalloc %d, retrying from %s %s:%d\n", + (int)len, file, func, line); + } + cksleep_ms(backoff); + backoff <<= 1; + } + return ptr; +} + +void *json_ckalloc(size_t size) +{ + return _ckalloc(size, __FILE__, __func__, __LINE__); +} + +void *_ckzalloc(size_t len, const char *file, const char *func, const int line) +{ + int backoff = 1; + void *ptr; + + align_len(&len); + while (42) { + ptr = calloc(len, 1); + if (likely(ptr)) + break; + if (backoff == 1) { + fprintf(stderr, "Failed to ckzalloc %d, retrying from %s %s:%d\n", + (int)len, file, func, line); + } + cksleep_ms(backoff); + backoff <<= 1; + } + return ptr; +} + +/* Round up to the nearest page size for efficient malloc */ +size_t round_up_page(size_t len) +{ + int rem = len % PAGESIZE; + + if (rem) + len += PAGESIZE - rem; + return len; +} + + + +/* Adequate size s==len*2 + 1 must be alloced to use this variant */ +void __bin2hex(void *vs, const void *vp, size_t len) +{ + static const char hex[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; + const uchar *p = vp; + uchar *s = vs; + int i; + + for (i = 0; i < (int)len; i++) { + *s++ = hex[p[i] >> 4]; + *s++ = hex[p[i] & 0xF]; + } + *s++ = '\0'; +} + +/* Returns a malloced array string of a binary value of arbitrary length. The + * array is rounded up to a 4 byte size to appease architectures that need + * aligned array sizes */ +void *bin2hex(const void *vp, size_t len) +{ + const uchar *p = vp; + size_t slen; + uchar *s; + + slen = len * 2 + 1; + s = ckzalloc(slen); + __bin2hex(s, p, len); + + return s; +} + +const int hex2bin_tbl[256] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, + -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, +}; + +bool _validhex(const char *buf, const char *file, const char *func, const int line) +{ + unsigned int i, slen; + bool ret = false; + + slen = strlen(buf); + if (!slen || slen % 2) { + LOGDEBUG("Invalid hex due to length %u from %s %s:%d", slen, file, func, line); + goto out; + } + for (i = 0; i < slen; i++) { + uchar idx = buf[i]; + + if (hex2bin_tbl[idx] == -1) { + LOGDEBUG("Invalid hex due to value %u at offset %d from %s %s:%d", + idx, i, file, func, line); + goto out; + } + } + ret = true; +out: + return ret; +} + +/* Does the reverse of bin2hex but does not allocate any ram */ +bool _hex2bin(void *vp, const void *vhexstr, size_t len, const char *file, const char *func, const int line) +{ + const uchar *hexstr = vhexstr; + int nibble1, nibble2; + bool ret = false; + uchar *p = vp; + uchar idx; + + while (*hexstr && len) { + if (unlikely(!hexstr[1])) { + LOGWARNING("Early end of string in hex2bin from %s %s:%d", file, func, line); + return ret; + } + + idx = *hexstr++; + nibble1 = hex2bin_tbl[idx]; + idx = *hexstr++; + nibble2 = hex2bin_tbl[idx]; + + if (unlikely((nibble1 < 0) || (nibble2 < 0))) { + LOGWARNING("Invalid binary encoding in hex2bin from %s %s:%d", file, func, line); + return ret; + } + + *p++ = (((uchar)nibble1) << 4) | ((uchar)nibble2); + --len; + } + + if (likely(len == 0 && *hexstr == 0)) + ret = true; + if (!ret) + LOGWARNING("Failed hex2bin decode from %s %s:%d", file, func, line); + return ret; +} + +static const int b58tobin_tbl[] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, -1, -1, -1, + -1, 9, 10, 11, 12, 13, 14, 15, 16, -1, 17, 18, 19, 20, 21, -1, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, -1, -1, -1, -1, -1, + -1, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, -1, 44, 45, 46, + 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57 +}; + +/* b58bin should always be at least 25 bytes long and already checked to be + * valid. */ +void b58tobin(char *b58bin, const char *b58) +{ + uint32_t c, bin32[7]; + int len, i, j; + uint64_t t; + + memset(bin32, 0, 7 * sizeof(uint32_t)); + len = strlen((const char *)b58); + for (i = 0; i < len; i++) { + c = b58[i]; + c = b58tobin_tbl[c]; + for (j = 6; j >= 0; j--) { + t = ((uint64_t)bin32[j]) * 58 + c; + c = (t & 0x3f00000000ull) >> 32; + bin32[j] = t & 0xffffffffull; + } + } + *(b58bin++) = bin32[0] & 0xff; + for (i = 1; i < 7; i++) { + *((uint32_t *)b58bin) = htobe32(bin32[i]); + b58bin += sizeof(uint32_t); + } +} + +/* Does a safe string comparison tolerating zero length and NULL strings */ +int safecmp(const char *a, const char *b) +{ + int lena, lenb; + + if (unlikely(!a || !b)) { + if (a != b) + return -1; + return 0; + } + lena = strlen(a); + lenb = strlen(b); + if (unlikely(!lena || !lenb)) { + if (lena != lenb) + return -1; + return 0; + } + return (strcmp(a, b)); +} + +/* Returns whether there is a case insensitive match of buf to cmd, safely + * handling NULL or zero length strings. */ +bool cmdmatch(const char *buf, const char *cmd) +{ + int cmdlen, buflen; + + if (!buf) + return false; + buflen = strlen(buf); + if (!buflen) + return false; + cmdlen = strlen(cmd); + if (buflen < cmdlen) + return false; + return !strncasecmp(buf, cmd, cmdlen); +} + + +static const char base64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; + +/* Return a malloced string of *src encoded into mime base 64 */ +char *http_base64(const char *src) +{ + char *str, *dst; + size_t l, hlen; + int t, r; + + l = strlen((const char *)src); + hlen = ((l + 2) / 3) * 4 + 1; + str = ckalloc(hlen); + dst = str; + r = 0; + + while (l >= 3) { + t = (src[0] << 16) | (src[1] << 8) | src[2]; + dst[0] = base64[(t >> 18) & 0x3f]; + dst[1] = base64[(t >> 12) & 0x3f]; + dst[2] = base64[(t >> 6) & 0x3f]; + dst[3] = base64[(t >> 0) & 0x3f]; + src += 3; l -= 3; + dst += 4; r += 4; + } + + switch (l) { + case 2: + t = (src[0] << 16) | (src[1] << 8); + dst[0] = base64[(t >> 18) & 0x3f]; + dst[1] = base64[(t >> 12) & 0x3f]; + dst[2] = base64[(t >> 6) & 0x3f]; + dst[3] = '='; + dst += 4; + r += 4; + break; + case 1: + t = src[0] << 16; + dst[0] = base64[(t >> 18) & 0x3f]; + dst[1] = base64[(t >> 12) & 0x3f]; + dst[2] = dst[3] = '='; + dst += 4; + r += 4; + break; + case 0: + break; + } + *dst = 0; + return (str); +} + +static const int8_t charset_rev[128] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + 15, -1, 10, 17, 21, 20, 26, 30, 7, 5, -1, -1, -1, -1, -1, -1, + -1, 29, -1, 24, 13, 25, 9, 8, 23, -1, 18, 22, 31, 27, 19, -1, + 1, 0, 3, 16, 11, 28, 12, 14, 6, 4, 2, -1, -1, -1, -1, -1, + -1, 29, -1, 24, 13, 25, 9, 8, 23, -1, 18, 22, 31, 27, 19, -1, + 1, 0, 3, 16, 11, 28, 12, 14, 6, 4, 2, -1, -1, -1, -1, -1 +}; + +/* It's assumed that there is no chance of sending invalid chars to these + * functions as they should have been checked beforehand. */ +static void bech32_decode(uint8_t *data, int *data_len, const char *input) +{ + int input_len = strlen(input), hrp_len, i; + + *data_len = 0; + while (*data_len < input_len && input[(input_len - 1) - *data_len] != '1') + ++(*data_len); + hrp_len = input_len - (1 + *data_len); + *(data_len) -= 6; + for (i = hrp_len + 1; i < input_len; i++) { + int v = (input[i] & 0x80) ? -1 : charset_rev[(int)input[i]]; + + if (i + 6 < input_len) + data[i - (1 + hrp_len)] = v; + } +} + +static void convert_bits(char *out, int *outlen, const uint8_t *in, + int inlen) +{ + const int outbits = 8, inbits = 5; + uint32_t val = 0, maxv = (((uint32_t)1) << outbits) - 1; + int bits = 0; + + while (inlen--) { + val = (val << inbits) | *(in++); + bits += inbits; + while (bits >= outbits) { + bits -= outbits; + out[(*outlen)++] = (val >> bits) & maxv; + } + } +} + +static int address_to_pubkeytxn(char *pkh, const char *addr) +{ + char b58bin[25] = {}; + + b58tobin(b58bin, addr); + pkh[0] = 0x76; + pkh[1] = 0xa9; + pkh[2] = 0x14; + memcpy(&pkh[3], &b58bin[1], 20); + pkh[23] = 0x88; + pkh[24] = 0xac; + return 25; +} + +static int address_to_scripttxn(char *psh, const char *addr) +{ + char b58bin[25] = {}; + + b58tobin(b58bin, addr); + psh[0] = 0xa9; + psh[1] = 0x14; + memcpy(&psh[2], &b58bin[1], 20); + psh[22] = 0x87; + return 23; +} + +static int segaddress_to_txn(char *p2h, const char *addr) +{ + int data_len, witdata_len = 0; + char *witdata = &p2h[2]; + uint8_t data[84]; + + bech32_decode(data, &data_len, addr); + p2h[0] = data[0]; + /* Witness version is > 0 */ + if (p2h[0]) + p2h[0] += 0x50; + convert_bits(witdata, &witdata_len, data + 1, data_len - 1); + p2h[1] = witdata_len; + return witdata_len + 2; +} + +/* Convert an address to a transaction and return the length of the transaction */ +int address_to_txn(char *p2h, const char *addr, const bool script, const bool segwit) +{ + if (segwit) + return segaddress_to_txn(p2h, addr); + if (script) + return address_to_scripttxn(p2h, addr); + return address_to_pubkeytxn(p2h, addr); +} + +/* For encoding nHeight into coinbase, return how many bytes were used */ +int ser_number(uchar *s, int32_t val) +{ + int32_t *i32 = (int32_t *)&s[1]; + int len; + + if (val < 0x80) + len = 1; + else if (val < 0x8000) + len = 2; + else if (val < 0x800000) + len = 3; + else + len = 4; + *i32 = htole32(val); + s[0] = len++; + return len; +} + +int get_sernumber(uchar *s) +{ + int32_t val = 0; + int len; + + len = s[0]; + if (unlikely(len < 1 || len > 4)) + return 0; + memcpy(&val, &s[1], len); + return le32toh(val); +} + +/* For testing a le encoded 256 byte hash against a target */ +bool fulltest(const uchar *hash, const uchar *target) +{ + uint32_t *hash32 = (uint32_t *)hash; + uint32_t *target32 = (uint32_t *)target; + bool ret = true; + int i; + + for (i = 28 / 4; i >= 0; i--) { + uint32_t h32tmp = le32toh(hash32[i]); + uint32_t t32tmp = le32toh(target32[i]); + + if (h32tmp > t32tmp) { + ret = false; + break; + } + if (h32tmp < t32tmp) { + ret = true; + break; + } + } + return ret; +} + +void copy_tv(tv_t *dest, const tv_t *src) +{ + memcpy(dest, src, sizeof(tv_t)); +} + +void ts_to_tv(tv_t *val, const ts_t *spec) +{ + val->tv_sec = spec->tv_sec; + val->tv_usec = spec->tv_nsec / 1000; +} + +void tv_to_ts(ts_t *spec, const tv_t *val) +{ + spec->tv_sec = val->tv_sec; + spec->tv_nsec = val->tv_usec * 1000; +} + +void us_to_tv(tv_t *val, int64_t us) +{ + lldiv_t tvdiv = lldiv(us, 1000000); + + val->tv_sec = tvdiv.quot; + val->tv_usec = tvdiv.rem; +} + +void us_to_ts(ts_t *spec, int64_t us) +{ + lldiv_t tvdiv = lldiv(us, 1000000); + + spec->tv_sec = tvdiv.quot; + spec->tv_nsec = tvdiv.rem * 1000; +} + +void ms_to_ts(ts_t *spec, int64_t ms) +{ + lldiv_t tvdiv = lldiv(ms, 1000); + + spec->tv_sec = tvdiv.quot; + spec->tv_nsec = tvdiv.rem * 1000000; +} + +void ms_to_tv(tv_t *val, int64_t ms) +{ + lldiv_t tvdiv = lldiv(ms, 1000); + + val->tv_sec = tvdiv.quot; + val->tv_usec = tvdiv.rem * 1000; +} + +void tv_time(tv_t *tv) +{ + gettimeofday(tv, NULL); +} + +void ts_realtime(ts_t *ts) +{ + clock_gettime(CLOCK_REALTIME, ts); +} + +void cksleep_prepare_r(ts_t *ts) +{ + clock_gettime(CLOCK_MONOTONIC, ts); +} + +void nanosleep_abstime(ts_t *ts_end) +{ + clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, ts_end, NULL); +} + +void timeraddspec(ts_t *a, const ts_t *b) +{ + a->tv_sec += b->tv_sec; + a->tv_nsec += b->tv_nsec; + if (a->tv_nsec >= 1000000000) { + a->tv_nsec -= 1000000000; + a->tv_sec++; + } +} + +/* Reentrant version of cksleep functions allow start time to be set separately + * from the beginning of the actual sleep, allowing scheduling delays to be + * counted in the sleep. */ +void cksleep_ms_r(ts_t *ts_start, int ms) +{ + ts_t ts_end; + + ms_to_ts(&ts_end, ms); + timeraddspec(&ts_end, ts_start); + nanosleep_abstime(&ts_end); +} + +void cksleep_us_r(ts_t *ts_start, int64_t us) +{ + ts_t ts_end; + + us_to_ts(&ts_end, us); + timeraddspec(&ts_end, ts_start); + nanosleep_abstime(&ts_end); +} + +void cksleep_ms(int ms) +{ + ts_t ts_start; + + cksleep_prepare_r(&ts_start); + cksleep_ms_r(&ts_start, ms); +} + +void cksleep_us(int64_t us) +{ + ts_t ts_start; + + cksleep_prepare_r(&ts_start); + cksleep_us_r(&ts_start, us); +} + +/* Returns the microseconds difference between end and start times as a double */ +double us_tvdiff(tv_t *end, tv_t *start) +{ + /* Sanity check. We should only be using this for small differences so + * limit the max to 60 seconds. */ + if (unlikely(end->tv_sec - start->tv_sec > 60)) + return 60000000; + return (end->tv_sec - start->tv_sec) * 1000000 + (end->tv_usec - start->tv_usec); +} + +/* Returns the milliseconds difference between end and start times */ +int ms_tvdiff(tv_t *end, tv_t *start) +{ + /* Like us_tdiff, limit to 1 hour. */ + if (unlikely(end->tv_sec - start->tv_sec > 3600)) + return 3600000; + return (end->tv_sec - start->tv_sec) * 1000 + (end->tv_usec - start->tv_usec) / 1000; +} + +/* Returns the seconds difference between end and start times as a double */ +double tvdiff(tv_t *end, tv_t *start) +{ + return end->tv_sec - start->tv_sec + (end->tv_usec - start->tv_usec) / 1000000.0; +} + +/* Create an exponentially decaying average over interval */ +void decay_time(double *f, double fadd, double fsecs, double interval) +{ + double ftotal, fprop, dexp; + + if (fsecs <= 0) + return; + dexp = fsecs / interval; + /* Put Sanity bound on how large the denominator can get */ + if (unlikely(dexp > 36)) + dexp = 36; + fprop = 1.0 - 1 / exp(dexp); + ftotal = 1.0 + fprop; + *f += (fadd / fsecs * fprop); + *f /= ftotal; + /* Sanity check to prevent meaningless super small numbers that + * eventually underflow libjansson's real number interpretation. */ + if (unlikely(*f < 2E-16)) + *f = 0; +} + +/* Sanity check to prevent clock adjustments backwards from screwing up stats */ +double sane_tdiff(tv_t *end, tv_t *start) +{ + double tdiff = tvdiff(end, start); + + if (unlikely(tdiff < 0.001)) + tdiff = 0.001; + return tdiff; +} + +/* Convert a double value into a truncated string for displaying with its + * associated suitable for Mega, Giga etc. Buf array needs to be long enough */ +void suffix_string(double val, char *buf, size_t bufsiz, int sigdigits) +{ + const double kilo = 1000; + const double mega = 1000000; + const double giga = 1000000000; + const double tera = 1000000000000; + const double peta = 1000000000000000; + const double exa = 1000000000000000000; + char suffix[2] = ""; + bool decimal = true; + double dval; + + if (val >= exa) { + val /= peta; + dval = val / kilo; + strcpy(suffix, "E"); + } else if (val >= peta) { + val /= tera; + dval = val / kilo; + strcpy(suffix, "P"); + } else if (val >= tera) { + val /= giga; + dval = val / kilo; + strcpy(suffix, "T"); + } else if (val >= giga) { + val /= mega; + dval = val / kilo; + strcpy(suffix, "G"); + } else if (val >= mega) { + val /= kilo; + dval = val / kilo; + strcpy(suffix, "M"); + } else if (val >= kilo) { + dval = val / kilo; + strcpy(suffix, "K"); + } else { + dval = val; + decimal = false; + } + + if (!sigdigits) { + if (decimal) + snprintf(buf, bufsiz, "%.3g%s", dval, suffix); + else + snprintf(buf, bufsiz, "%d%s", (unsigned int)dval, suffix); + } else { + /* Always show sigdigits + 1, padded on right with zeroes + * followed by suffix */ + int ndigits = sigdigits - 1 - (dval > 0.0 ? floor(log10(dval)) : 0); + + snprintf(buf, bufsiz, "%*.*f%s", sigdigits + 1, ndigits, dval, suffix); + } +} + +/* truediffone == 0x00000000FFFF0000000000000000000000000000000000000000000000000000 + * Generate a 256 bit binary LE target by cutting up diff into 64 bit sized + * portions or vice versa. */ +static const double truediffone = 26959535291011309493156476344723991336010898738574164086137773096960.0; +static const double bits192 = 6277101735386680763835789423207666416102355444464034512896.0; +static const double bits128 = 340282366920938463463374607431768211456.0; +static const double bits64 = 18446744073709551616.0; + +/* Converts a little endian 256 bit value to a double */ +double le256todouble(const uchar *target) +{ + uint64_t *data64; + double dcut64; + + data64 = (uint64_t *)(target + 24); + dcut64 = le64toh(*data64) * bits192; + + data64 = (uint64_t *)(target + 16); + dcut64 += le64toh(*data64) * bits128; + + data64 = (uint64_t *)(target + 8); + dcut64 += le64toh(*data64) * bits64; + + data64 = (uint64_t *)(target); + dcut64 += le64toh(*data64); + + return dcut64; +} + +/* Converts a big endian 256 bit value to a double */ +double be256todouble(const uchar *target) +{ + uint64_t *data64; + double dcut64; + + data64 = (uint64_t *)(target); + dcut64 = be64toh(*data64) * bits192; + + data64 = (uint64_t *)(target + 8); + dcut64 += be64toh(*data64) * bits128; + + data64 = (uint64_t *)(target + 16); + dcut64 += be64toh(*data64) * bits64; + + data64 = (uint64_t *)(target + 24); + dcut64 += be64toh(*data64); + + return dcut64; +} + +/* Return a difficulty from a binary target */ +double diff_from_target(uchar *target) +{ + double dcut64; + + dcut64 = le256todouble(target); + if (unlikely(dcut64 <= 0)) + dcut64 = 1; + return truediffone / dcut64; +} + +/* Return a difficulty from a binary big endian target */ +double diff_from_betarget(uchar *target) +{ + double dcut64; + + dcut64 = be256todouble(target); + if (unlikely(dcut64 <= 0)) + dcut64 = 1; + return truediffone / dcut64; +} + +/* Return the network difficulty from the block header which is in packed form, + * as a double. */ +double diff_from_nbits(char *nbits) +{ + uint8_t shift = nbits[0]; + uchar target[32] = {}; + char *nb; + + nb = bin2hex(nbits, 4); + LOGDEBUG("Nbits is %s", nb); + free(nb); + if (unlikely(shift < 3)) { + LOGWARNING("Corrupt shift of %d in nbits", shift); + shift = 3; + } else if (unlikely(shift > 32)) { + LOGWARNING("Corrupt shift of %d in nbits", shift); + shift = 32; + } + memcpy(target + (32 - shift), nbits + 1, 3); + return diff_from_betarget(target); +} + +void target_from_diff(uchar *target, double diff) +{ + uint64_t *data64, h64; + double d64, dcut64; + + if (unlikely(diff == 0.0)) { + /* This shouldn't happen but best we check to prevent a crash */ + memset(target, 0xff, 32); + return; + } + + d64 = truediffone; + d64 /= diff; + + dcut64 = d64 / bits192; + h64 = dcut64; + data64 = (uint64_t *)(target + 24); + *data64 = htole64(h64); + dcut64 = h64; + dcut64 *= bits192; + d64 -= dcut64; + + dcut64 = d64 / bits128; + h64 = dcut64; + data64 = (uint64_t *)(target + 16); + *data64 = htole64(h64); + dcut64 = h64; + dcut64 *= bits128; + d64 -= dcut64; + + dcut64 = d64 / bits64; + h64 = dcut64; + data64 = (uint64_t *)(target + 8); + *data64 = htole64(h64); + dcut64 = h64; + dcut64 *= bits64; + d64 -= dcut64; + + h64 = d64; + data64 = (uint64_t *)(target); + *data64 = htole64(h64); +} + +void gen_hash(uchar *data, uchar *hash, int len) +{ + uchar hash1[32]; + + sha256(data, len, hash1); + sha256(hash1, 32, hash); +} diff --git a/solo-ckpool-source/src/libckpool.h b/solo-ckpool-source/src/libckpool.h new file mode 100644 index 0000000..78a83fe --- /dev/null +++ b/solo-ckpool-source/src/libckpool.h @@ -0,0 +1,616 @@ +/* + * Copyright 2014-2018,2023 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +/* This file should contain all exported functions of libckpool */ + +#ifndef LIBCKPOOL_H +#define LIBCKPOOL_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#if HAVE_BYTESWAP_H +# include +#endif + +#if HAVE_ENDIAN_H +# include +#elif HAVE_SYS_ENDIAN_H +# include +#endif + +#include +#include + +#include "utlist.h" + +#ifndef bswap_16 + #define bswap_16 __builtin_bswap16 + #define bswap_32 __builtin_bswap32 + #define bswap_64 __builtin_bswap64 +#endif + +/* This assumes htobe32 is a macro in endian.h, and if it doesn't exist, then + * htobe64 also won't exist */ +#ifndef htobe32 +# if __BYTE_ORDER == __LITTLE_ENDIAN +# define htole16(x) (x) +# define le16toh(x) (x) +# define htole32(x) (x) +# define htole64(x) (x) +# define le32toh(x) (x) +# define le64toh(x) (x) +# define be32toh(x) bswap_32(x) +# define be64toh(x) bswap_64(x) +# define htobe16(x) bswap_16(x) +# define htobe32(x) bswap_32(x) +# define htobe64(x) bswap_64(x) +# elif __BYTE_ORDER == __BIG_ENDIAN +# define htole16(x) bswap_16(x) +# define le16toh(x) bswap_16(x) +# define htole32(x) bswap_32(x) +# define le32toh(x) bswap_32(x) +# define le64toh(x) bswap_64(x) +# define htole64(x) bswap_64(x) +# define be32toh(x) (x) +# define be64toh(x) (x) +# define htobe16(x) (x) +# define htobe32(x) (x) +# define htobe64(x) (x) +# endif +#endif + +#define unlikely(expr) (__builtin_expect(!!(expr), 0)) +#define likely(expr) (__builtin_expect(!!(expr), 1)) +#define __maybe_unused __attribute__((unused)) +#define uninitialised_var(x) x = x + +#ifndef MAX +#define MAX(a,b) \ + ({ __typeof__ (a) _a = (a); \ + __typeof__ (b) _b = (b); \ + _a > _b ? _a : _b; }) +#endif +#ifndef MIN +#define MIN(a,b) \ + ({ __typeof__ (a) _a = (a); \ + __typeof__ (b) _b = (b); \ + _a < _b ? _a : _b; }) +#endif + +typedef unsigned char uchar; + +typedef struct timeval tv_t; +typedef struct timespec ts_t; + +static inline void swap_256(void *dest_p, const void *src_p) +{ + uint32_t *dest = dest_p; + const uint32_t *src = src_p; + + dest[0] = src[7]; + dest[1] = src[6]; + dest[2] = src[5]; + dest[3] = src[4]; + dest[4] = src[3]; + dest[5] = src[2]; + dest[6] = src[1]; + dest[7] = src[0]; +} + +static inline void bswap_256(void *dest_p, const void *src_p) +{ + uint32_t *dest = dest_p; + const uint32_t *src = src_p; + + dest[0] = bswap_32(src[7]); + dest[1] = bswap_32(src[6]); + dest[2] = bswap_32(src[5]); + dest[3] = bswap_32(src[4]); + dest[4] = bswap_32(src[3]); + dest[5] = bswap_32(src[2]); + dest[6] = bswap_32(src[1]); + dest[7] = bswap_32(src[0]); +} + +static inline void flip_32(void *dest_p, const void *src_p) +{ + uint32_t *dest = dest_p; + const uint32_t *src = src_p; + int i; + + for (i = 0; i < 8; i++) + dest[i] = bswap_32(src[i]); +} + +static inline void flip_80(void *dest_p, const void *src_p) +{ + uint32_t *dest = dest_p; + const uint32_t *src = src_p; + int i; + + for (i = 0; i < 20; i++) + dest[i] = bswap_32(src[i]); +} + +#define cond_wait(_cond, _lock) _cond_wait(_cond, _lock, __FILE__, __func__, __LINE__) +#define cond_timedwait(_cond, _lock, _abstime) _cond_timedwait(_cond, _lock, _abstime, __FILE__, __func__, __LINE__) +#define mutex_timedlock(_lock, _timeout) _mutex_timedlock(_lock, _timeout, __FILE__, __func__, __LINE__) +#define mutex_lock(_lock) _mutex_lock(_lock, __FILE__, __func__, __LINE__) +#define mutex_unlock_noyield(_lock) _mutex_unlock_noyield(_lock, __FILE__, __func__, __LINE__) +#define mutex_unlock(_lock) _mutex_unlock(_lock, __FILE__, __func__, __LINE__) +#define mutex_trylock(_lock) _mutex_trylock(_lock, __FILE__, __func__, __LINE__) +#define wr_lock(_lock) _wr_lock(_lock, __FILE__, __func__, __LINE__) +#define wr_trylock(_lock) _wr_trylock(_lock, __FILE__, __func__, __LINE__) +#define rd_lock(_lock) _rd_lock(_lock, __FILE__, __func__, __LINE__) +#define rw_unlock(_lock) _rw_unlock(_lock, __FILE__, __func__, __LINE__) +#define rd_unlock_noyield(_lock) _rd_unlock_noyield(_lock, __FILE__, __func__, __LINE__) +#define wr_unlock_noyield(_lock) _wr_unlock_noyield(_lock, __FILE__, __func__, __LINE__) +#define rd_unlock(_lock) _rd_unlock(_lock, __FILE__, __func__, __LINE__) +#define wr_unlock(_lock) _wr_unlock(_lock, __FILE__, __func__, __LINE__) +#define mutex_init(_lock) _mutex_init(_lock, __FILE__, __func__, __LINE__) +#define rwlock_init(_lock) _rwlock_init(_lock, __FILE__, __func__, __LINE__) +#define cond_init(_cond) _cond_init(_cond, __FILE__, __func__, __LINE__) + +#define cklock_init(_lock) _cklock_init(_lock, __FILE__, __func__, __LINE__) +#define ck_rlock(_lock) _ck_rlock(_lock, __FILE__, __func__, __LINE__) +#define ck_wlock(_lock) _ck_wlock(_lock, __FILE__, __func__, __LINE__) +#define ck_dwlock(_lock) _ck_dwlock(_lock, __FILE__, __func__, __LINE__) +#define ck_dlock(_lock) _ck_dlock(_lock, __FILE__, __func__, __LINE__) +#define ck_runlock(_lock) _ck_runlock(_lock, __FILE__, __func__, __LINE__) +#define ck_wunlock(_lock) _ck_wunlock(_lock, __FILE__, __func__, __LINE__) + +#define ckalloc(len) _ckalloc(len, __FILE__, __func__, __LINE__) +#define ckzalloc(len) _ckzalloc(len, __FILE__, __func__, __LINE__) + +#define dealloc(ptr) do { \ + free(ptr); \ + ptr = NULL; \ +} while (0) + +#define VASPRINTF(strp, fmt, ...) do { \ + if (unlikely(vasprintf(strp, fmt, ##__VA_ARGS__) < 0)) \ + quitfrom(1, __FILE__, __func__, __LINE__, "Failed to vasprintf"); \ +} while (0) + +#define ASPRINTF(strp, fmt, ...) do { \ + if (unlikely(asprintf(strp, fmt, ##__VA_ARGS__) < 0)) \ + quitfrom(1, __FILE__, __func__, __LINE__, "Failed to asprintf"); \ +} while (0) + +void logmsg(int loglevel, const char *fmt, ...); + +#define DEFLOGBUFSIZ 512 + +#define LOGMSGSIZ(__siz, __lvl, __fmt, ...) do { \ + char *BUF; \ + int LEN, OFFSET = 0; \ + ASPRINTF(&BUF, __fmt, ##__VA_ARGS__); \ + LEN = strlen(BUF); \ + while (LEN > 0) { \ + char tmp42[__siz] = {}; \ + int CPY = MIN(LEN, DEFLOGBUFSIZ - 2); \ + memcpy(tmp42, BUF + OFFSET, CPY); \ + logmsg(__lvl, "%s", tmp42);\ + OFFSET += CPY; \ + LEN -= OFFSET; \ + } \ + free(BUF); \ +} while(0) + +#define LOGMSG(_lvl, _fmt, ...) \ + LOGMSGSIZ(DEFLOGBUFSIZ, _lvl, _fmt, ##__VA_ARGS__) + +#define LOGEMERG(fmt, ...) LOGMSG(LOG_EMERG, fmt, ##__VA_ARGS__) +#define LOGALERT(fmt, ...) LOGMSG(LOG_ALERT, fmt, ##__VA_ARGS__) +#define LOGCRIT(fmt, ...) LOGMSG(LOG_CRIT, fmt, ##__VA_ARGS__) +#define LOGERR(fmt, ...) LOGMSG(LOG_ERR, fmt, ##__VA_ARGS__) +#define LOGWARNING(fmt, ...) LOGMSG(LOG_WARNING, fmt, ##__VA_ARGS__) +#define LOGNOTICE(fmt, ...) LOGMSG(LOG_NOTICE, fmt, ##__VA_ARGS__) +#define LOGINFO(fmt, ...) LOGMSG(LOG_INFO, fmt, ##__VA_ARGS__) +#define LOGDEBUG(fmt, ...) LOGMSG(LOG_DEBUG, fmt, ##__VA_ARGS__) + +#define IN_FMT_FFL " in %s %s():%d" +#define quitfrom(status, _file, _func, _line, fmt, ...) do { \ + if (fmt) { \ + fprintf(stderr, fmt IN_FMT_FFL, ##__VA_ARGS__, _file, _func, _line); \ + fprintf(stderr, "\n"); \ + fflush(stderr); \ + } \ + exit(status); \ +} while (0) + +#define quit(status, fmt, ...) do { \ + if (fmt) { \ + fprintf(stderr, fmt, ##__VA_ARGS__); \ + fprintf(stderr, "\n"); \ + fflush(stderr); \ + } \ + exit(status); \ +} while (0) + +#define PAGESIZE (4096) + +/* Default timeouts for unix socket reads and writes in seconds. Set write + * timeout to double the read timeout in case of one read blocking the next + * writer. */ +#define UNIX_READ_TIMEOUT 5 +#define UNIX_WRITE_TIMEOUT 10 + +#define MIN1 60 +#define MIN5 300 +#define MIN15 900 +#define HOUR 3600 +#define HOUR6 21600 +#define DAY 86400 +#define WEEK 604800 + +/* Share error values */ + +enum share_err { + SE_INVALID_NONCE2 = -9, + SE_WORKER_MISMATCH, + SE_NO_NONCE, + SE_NO_NTIME, + SE_NO_NONCE2, + SE_NO_JOBID, + SE_NO_USERNAME, + SE_INVALID_SIZE, + SE_NOT_ARRAY, + SE_NONE, // 0 + SE_INVALID_JOBID, + SE_STALE, + SE_NTIME_INVALID, + SE_DUPE, + SE_HIGH_DIFF, + SE_INVALID_VERSION_MASK +}; + +static const char __maybe_unused *share_errs[] = { + "Invalid nonce2 length", + "Worker mismatch", + "No nonce", + "No ntime", + "No nonce2", + "No job_id", + "No username", + "Invalid array size", + "Params not array", + "Valid", + "Invalid JobID", + "Stale", + "Ntime out of range", + "Duplicate", + "Above target", + "Invalid version mask" +}; + +#define SHARE_ERR(x) share_errs[((x) + 9)] + +typedef struct ckmutex mutex_t; + +struct ckmutex { + pthread_mutex_t mutex; + const char *file; + const char *func; + int line; +}; + +typedef struct ckrwlock rwlock_t; + +struct ckrwlock { + pthread_rwlock_t rwlock; + const char *file; + const char *func; + int line; +}; + +/* ck locks, a write biased variant of rwlocks */ +struct cklock { + mutex_t mutex; + rwlock_t rwlock; + const char *file; + const char *func; + int line; +}; + +typedef struct cklock cklock_t; + +struct unixsock { + int sockd; + char *path; +}; + +typedef struct unixsock unixsock_t; + +void _json_check(json_t *val, json_error_t *err, const char *file, const char *func, const int line); +#define json_check(VAL, ERR) _json_check(VAL, ERR, __FILE__, __func__, __LINE__) + +/* Check and pack json */ +#define JSON_CPACK(VAL, ...) do { \ + json_error_t ERR; \ + VAL = json_pack_ex(&ERR, 0, ##__VA_ARGS__); \ + json_check(VAL, &ERR); \ +} while (0) + +/* No error checking with these, make sure we know they're valid already! */ +static inline void json_strcpy(char *buf, json_t *val, const char *key) +{ + strcpy(buf, json_string_value(json_object_get(val, key)) ? : ""); +} + +static inline void json_dblcpy(double *dbl, json_t *val, const char *key) +{ + *dbl = json_real_value(json_object_get(val, key)); +} + +static inline void json_uintcpy(uint32_t *u32, json_t *val, const char *key) +{ + *u32 = (uint32_t)json_integer_value(json_object_get(val, key)); +} + +static inline void json_uint64cpy(uint64_t *u64, json_t *val, const char *key) +{ + *u64 = (uint64_t)json_integer_value(json_object_get(val, key)); +} + +static inline void json_int64cpy(int64_t *i64, json_t *val, const char *key) +{ + *i64 = (int64_t)json_integer_value(json_object_get(val, key)); +} + +static inline void json_intcpy(int *i, json_t *val, const char *key) +{ + *i = json_integer_value(json_object_get(val, key)); +} + +static inline void json_strdup(char **buf, json_t *val, const char *key) +{ + *buf = strdup(json_string_value(json_object_get(val, key)) ? : ""); +} + +/* Helpers for setting a field will check for valid entry and print an error + * if it is unsuccessfully set. */ +static inline void _json_set_string(json_t *val, const char *key, const char *str, + const char *file, const char *func, const int line) +{ + if (unlikely(json_object_set_new(val, key, json_string(str)))) + LOGERR("Failed to set json string from %s %s:%d", file, func, line); +} +#define json_set_string(val, key, str) _json_set_string(val, key, str, __FILE__, __func__, __LINE__) + +/* Int is long long so will work for u32 and int64 */ +static inline void _json_set_int(json_t *val, const char *key, int64_t integer, + const char *file, const char *func, const int line) +{ + if (unlikely(json_object_set_new_nocheck(val, key, json_integer(integer)))) + LOGERR("Failed to set json int from %s %s:%d", file, func, line); +} +#define json_set_int(val, key, integer) _json_set_int(val, key, integer, __FILE__, __func__, __LINE__) +#define json_set_uint32(val, key, u32) _json_set_int(val, key, u32, __FILE__, __func__, __LINE__) +#define json_set_int64(val, key, i64) _json_set_int(val, key, i64, __FILE__, __func__, __LINE__) + +static inline void _json_set_double(json_t *val, const char *key, double real, + const char *file, const char *func, const int line) +{ + if (unlikely(json_object_set_new_nocheck(val, key, json_real(real)))) + LOGERR("Failed to set json double from %s %s:%d", file, func, line); +} +#define json_set_double(val, key, real) _json_set_double(val, key, real, __FILE__, __func__, __LINE__) + +static inline void _json_set_bool(json_t *val, const char *key, bool boolean, + const char *file, const char *func, const int line) +{ + if (unlikely(json_object_set_new_nocheck(val, key, json_boolean(boolean)))) + LOGERR("Failed to set json bool from %s %s:%d", file, func, line); +} +#define json_set_bool(val, key, boolean) _json_set_bool(val, key, boolean, __FILE__, __func__, __LINE__) + + +static inline void _json_set_object(json_t *val, const char *key, json_t *object, + const char *file, const char *func, const int line) +{ + if (unlikely(json_object_set_new_nocheck(val, key, object))) + LOGERR("Failed to set json object from %s %s:%d", file, func, line); +} +#define json_set_object(val, key, object) _json_set_object(val, key, object, __FILE__, __func__, __LINE__) + +void rename_proc(const char *name); +void create_pthread(pthread_t *thread, void *(*start_routine)(void *), void *arg); +void join_pthread(pthread_t thread); +bool ck_completion_timeout(void *fn, void *fnarg, int timeout); + +int _cond_wait(pthread_cond_t *cond, mutex_t *lock, const char *file, const char *func, const int line); +int _cond_timedwait(pthread_cond_t *cond, mutex_t *lock, const struct timespec *abstime, const char *file, const char *func, const int line); +int _mutex_timedlock(mutex_t *lock, int timeout, const char *file, const char *func, const int line); +void _mutex_lock(mutex_t *lock, const char *file, const char *func, const int line); +void _mutex_unlock_noyield(mutex_t *lock, const char *file, const char *func, const int line); +void _mutex_unlock(mutex_t *lock, const char *file, const char *func, const int line); +int _mutex_trylock(mutex_t *lock, __maybe_unused const char *file, __maybe_unused const char *func, __maybe_unused const int line); +void mutex_destroy(mutex_t *lock); + +void _wr_lock(rwlock_t *lock, const char *file, const char *func, const int line); +int _wr_trylock(rwlock_t *lock, __maybe_unused const char *file, __maybe_unused const char *func, __maybe_unused const int line); +void _rd_lock(rwlock_t *lock, const char *file, const char *func, const int line); +void _rw_unlock(rwlock_t *lock, const char *file, const char *func, const int line); +void _rd_unlock_noyield(rwlock_t *lock, const char *file, const char *func, const int line); +void _wr_unlock_noyield(rwlock_t *lock, const char *file, const char *func, const int line); +void _rd_unlock(rwlock_t *lock, const char *file, const char *func, const int line); +void _wr_unlock(rwlock_t *lock, const char *file, const char *func, const int line); +void _mutex_init(mutex_t *lock, const char *file, const char *func, const int line); +void _rwlock_init(rwlock_t *lock, const char *file, const char *func, const int line); +void _cond_init(pthread_cond_t *cond, const char *file, const char *func, const int line); + +void _cklock_init(cklock_t *lock, const char *file, const char *func, const int line); +void _ck_rlock(cklock_t *lock, const char *file, const char *func, const int line); +void _ck_ilock(cklock_t *lock, const char *file, const char *func, const int line); +void _ck_uilock(cklock_t *lock, const char *file, const char *func, const int line); +void _ck_ulock(cklock_t *lock, const char *file, const char *func, const int line); +void _ck_wlock(cklock_t *lock, const char *file, const char *func, const int line); +void _ck_dwlock(cklock_t *lock, const char *file, const char *func, const int line); +void _ck_dwilock(cklock_t *lock, const char *file, const char *func, const int line); +void _ck_dlock(cklock_t *lock, const char *file, const char *func, const int line); +void _ck_runlock(cklock_t *lock, const char *file, const char *func, const int line); +void _ck_wunlock(cklock_t *lock, const char *file, const char *func, const int line); +void cklock_destroy(cklock_t *lock); + +void _cksem_init(sem_t *sem, const char *file, const char *func, const int line); +void _cksem_post(sem_t *sem, const char *file, const char *func, const int line); +void _cksem_wait(sem_t *sem, const char *file, const char *func, const int line); +int _cksem_trywait(sem_t *sem, const char *file, const char *func, const int line); +int _cksem_mswait(sem_t *sem, int ms, const char *file, const char *func, const int line); +void _cksem_destroy(sem_t *sem, const char *file, const char *func, const int line); + +#define cksem_init(SEM) _cksem_init(SEM, __FILE__, __func__, __LINE__) +#define cksem_post(SEM) _cksem_post(SEM, __FILE__, __func__, __LINE__) +#define cksem_wait(SEM) _cksem_wait(SEM, __FILE__, __func__, __LINE__) +#define cksem_trywait(SEM) _cksem_trywait(SEM, __FILE__, __func__, __LINE__) +#define cksem_mswait(SEM, _timeout) _cksem_mswait(SEM, _timeout, __FILE__, __func__, __LINE__) +#define cksem_destroy(SEM) _cksem_destroy(SEM, __FILE__, __func__, __LINE__) + +static inline bool sock_connecting(void) +{ + return errno == EINPROGRESS; +} + +static inline bool sock_blocks(void) +{ + return (errno == EAGAIN || errno == EWOULDBLOCK); +} + +static inline bool sock_timeout(void) +{ + return (errno == ETIMEDOUT); +} + +bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port); +bool url_from_sockaddr(const struct sockaddr *addr, char *url, char *port); +bool addrinfo_from_url(const char *url, const char *port, struct addrinfo *addrinfo); +bool url_from_serverurl(char *serverurl, char *newurl, char *newport); +bool url_from_socket(const int sockd, char *url, char *port); + +void keep_sockalive(int fd); +void nolinger_socket(int fd); +void noblock_socket(int fd); +void block_socket(int fd); +void _close(int *fd, const char *file, const char *func, const int line); +#define _Close(FD) _close(FD, __FILE__, __func__, __LINE__) +#define Close(FD) _close(&FD, __FILE__, __func__, __LINE__) +int bind_socket(char *url, char *port); +int connect_socket(char *url, char *port); +int round_trip(char *url); +int write_socket(int fd, const void *buf, size_t nbyte); +void empty_socket(int fd); +void _close_unix_socket(int *sockd, const char *server_path); +#define close_unix_socket(sockd, server_path) _close_unix_socket(&sockd, server_path) +int _open_unix_server(const char *server_path, const char *file, const char *func, const int line); +#define open_unix_server(server_path) _open_unix_server(server_path, __FILE__, __func__, __LINE__) +int _open_unix_client(const char *server_path, const char *file, const char *func, const int line); +#define open_unix_client(server_path) _open_unix_client(server_path, __FILE__, __func__, __LINE__) +int wait_close(int sockd, int timeout); +int wait_read_select(int sockd, float timeout); +int read_length(int sockd, void *buf, int len); +char *_recv_unix_msg(int sockd, int timeout1, int timeout2, const char *file, const char *func, const int line); +#define RECV_UNIX_TIMEOUT1 30 +#define RECV_UNIX_TIMEOUT2 5 +#define recv_unix_msg(sockd) _recv_unix_msg(sockd, UNIX_READ_TIMEOUT, UNIX_READ_TIMEOUT, __FILE__, __func__, __LINE__) +#define recv_unix_msg_tmo(sockd, tmo) _recv_unix_msg(sockd, tmo, UNIX_READ_TIMEOUT, __FILE__, __func__, __LINE__) +#define recv_unix_msg_tmo2(sockd, tmo1, tmo2) _recv_unix_msg(sockd, tmo1, tmo2, __FILE__, __func__, __LINE__) +int wait_write_select(int sockd, float timeout); +#define write_length(sockd, buf, len) _write_length(sockd, buf, len, __FILE__, __func__, __LINE__) +int _write_length(int sockd, const void *buf, int len, const char *file, const char *func, const int line); +bool _send_unix_msg(int sockd, const char *buf, int timeout, const char *file, const char *func, const int line); +#define send_unix_msg(sockd, buf) _send_unix_msg(sockd, buf, UNIX_WRITE_TIMEOUT, __FILE__, __func__, __LINE__) +bool _send_unix_data(int sockd, const struct msghdr *msg, const char *file, const char *func, const int line); +#define send_unix_data(sockd, msg) _send_unix_data(sockd, msg, __FILE__, __func__, __LINE__) +bool _recv_unix_data(int sockd, struct msghdr *msg, const char *file, const char *func, const int line); +#define recv_unix_data(sockd, msg) _recv_unix_data(sockd, msg, __FILE__, __func__, __LINE__) +bool _send_fd(int fd, int sockd, const char *file, const char *func, const int line); +#define send_fd(fd, sockd) _send_fd(fd, sockd, __FILE__, __func__, __LINE__) +int _get_fd(int sockd, const char *file, const char *func, const int line); +#define get_fd(sockd) _get_fd(sockd, __FILE__, __func__, __LINE__) + +const char *__json_array_string(json_t *val, unsigned int entry); +char *json_array_string(json_t *val, unsigned int entry); +json_t *json_object_dup(json_t *val, const char *entry); + +char *rotating_filename(const char *path, time_t when); +bool rotating_log(const char *path, const char *msg); + +void align_len(size_t *len); +void realloc_strcat(char **ptr, const char *s); +void trail_slash(char **buf); +void *_ckalloc(size_t len, const char *file, const char *func, const int line); +void *json_ckalloc(size_t size); +void *_ckzalloc(size_t len, const char *file, const char *func, const int line); +size_t round_up_page(size_t len); + +extern const int hex2bin_tbl[]; +void __bin2hex(void *vs, const void *vp, size_t len); +void *bin2hex(const void *vp, size_t len); +bool _validhex(const char *buf, const char *file, const char *func, const int line); +#define validhex(buf) _validhex(buf, __FILE__, __func__, __LINE__) +bool _hex2bin(void *p, const void *vhexstr, size_t len, const char *file, const char *func, const int line); +#define hex2bin(p, vhexstr, len) _hex2bin(p, vhexstr, len, __FILE__, __func__, __LINE__) +char *http_base64(const char *src); +void b58tobin(char *b58bin, const char *b58); +int safecmp(const char *a, const char *b); +bool cmdmatch(const char *buf, const char *cmd); + +int address_to_txn(char *p2h, const char *addr, const bool script, const bool segwit); +int ser_number(uchar *s, int32_t val); +int get_sernumber(uchar *s); +bool fulltest(const uchar *hash, const uchar *target); + +void copy_tv(tv_t *dest, const tv_t *src); +void ts_to_tv(tv_t *val, const ts_t *spec); +void tv_to_ts(ts_t *spec, const tv_t *val); +void us_to_tv(tv_t *val, int64_t us); +void us_to_ts(ts_t *spec, int64_t us); +void ms_to_ts(ts_t *spec, int64_t ms); +void ms_to_tv(tv_t *val, int64_t ms); +void tv_time(tv_t *tv); +void ts_realtime(ts_t *ts); + +void cksleep_prepare_r(ts_t *ts); +void nanosleep_abstime(ts_t *ts_end); +void timeraddspec(ts_t *a, const ts_t *b); +void cksleep_ms_r(ts_t *ts_start, int ms); +void cksleep_us_r(ts_t *ts_start, int64_t us); +void cksleep_ms(int ms); +void cksleep_us(int64_t us); + +double us_tvdiff(tv_t *end, tv_t *start); +int ms_tvdiff(tv_t *end, tv_t *start); +double tvdiff(tv_t *end, tv_t *start); + +void decay_time(double *f, double fadd, double fsecs, double interval); +double sane_tdiff(tv_t *end, tv_t *start); +void suffix_string(double val, char *buf, size_t bufsiz, int sigdigits); + +double le256todouble(const uchar *target); +double be256todouble(const uchar *target); +double diff_from_target(uchar *target); +double diff_from_betarget(uchar *target); +double diff_from_nbits(char *nbits); +void target_from_diff(uchar *target, double diff); + +void gen_hash(uchar *data, uchar *hash, int len); + +#endif /* LIBCKPOOL_H */ diff --git a/solo-ckpool-source/src/notifier.c b/solo-ckpool-source/src/notifier.c new file mode 100644 index 0000000..8157a4b --- /dev/null +++ b/solo-ckpool-source/src/notifier.c @@ -0,0 +1,63 @@ +/* + * Copyright 2014-2016 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include +#include +#include + +#include "libckpool.h" + +int main(int argc, char **argv) +{ + char *name = NULL, *socket_dir = NULL; + bool proxy = false; + int c, sockd; + + while ((c = getopt(argc, argv, "n:s:p")) != -1) { + switch(c) { + case 'n': + name = strdup(optarg); + break; + case 's': + socket_dir = strdup(optarg); + break; + case 'p': + proxy = true; + break; + } + } + if (!socket_dir) + socket_dir = strdup("/tmp"); + trail_slash(&socket_dir); + if (!name) { + if (proxy) + name = strdup("ckproxy"); + else + name = strdup("ckpool"); + } + realloc_strcat(&socket_dir, name); + dealloc(name); + trail_slash(&socket_dir); + realloc_strcat(&socket_dir, "stratifier"); + sockd = open_unix_client(socket_dir); + if (sockd < 0) { + LOGERR("Failed to open socket: %s", socket_dir); + exit(1); + } + if (!send_unix_msg(sockd, "update")) { + LOGERR("Failed to send stratifier update msg"); + exit(1); + } + LOGNOTICE("Notified stratifier of block update"); + exit(0); +} + + diff --git a/solo-ckpool-source/src/sha2.c b/solo-ckpool-source/src/sha2.c new file mode 100644 index 0000000..f82356b --- /dev/null +++ b/solo-ckpool-source/src/sha2.c @@ -0,0 +1,236 @@ +/* + * FIPS 180-2 SHA-224/256/384/512 implementation + * Last update: 02/02/2007 + * Issue date: 04/30/2005 + * + * Copyright (C) 2013-2016, Con Kolivas + * Copyright (C) 2005, 2007 Olivier Gay + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include "config.h" + +#include +#include + +#include "sha2.h" + +#define UNPACK32(x, str) \ +{ \ + *((str) + 3) = (uint8_t) ((x) ); \ + *((str) + 2) = (uint8_t) ((x) >> 8); \ + *((str) + 1) = (uint8_t) ((x) >> 16); \ + *((str) + 0) = (uint8_t) ((x) >> 24); \ +} + +#define PACK32(str, x) \ +{ \ + *(x) = ((uint32_t) *((str) + 3) ) \ + | ((uint32_t) *((str) + 2) << 8) \ + | ((uint32_t) *((str) + 1) << 16) \ + | ((uint32_t) *((str) + 0) << 24); \ +} + +#define SHA256_SCR(i) \ +{ \ + w[i] = SHA256_F4(w[i - 2]) + w[i - 7] \ + + SHA256_F3(w[i - 15]) + w[i - 16]; \ +} + +uint32_t sha256_h0[8] = + {0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, + 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19}; + +uint32_t sha256_k[64] = + {0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, + 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, + 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, + 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, + 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, + 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, + 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, + 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, + 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, + 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, + 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, + 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, + 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, + 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, + 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, + 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2}; + +/* SHA-256 functions */ + +#ifdef USE_AVX2 +extern void sha256_rorx(const void *, uint32_t[8], uint64_t); + +void sha256_transf(sha256_ctx *ctx, const unsigned char *message, + unsigned int block_nb) +{ + sha256_rorx(message, ctx->h, block_nb); +} +#elif defined(USE_AVX1) +extern void sha256_avx(const unsigned char *, uint32_t[8], uint64_t); + +void sha256_transf(sha256_ctx *ctx, const unsigned char *message, + unsigned int block_nb) +{ + sha256_avx(message, ctx->h, block_nb); +} +#elif defined(USE_SSE4) +extern void sha256_sse4(const unsigned char *, uint32_t[8], uint64_t); + +void sha256_transf(sha256_ctx *ctx, const unsigned char *message, + unsigned int block_nb) +{ + sha256_sse4(message, ctx->h, block_nb); +} +#else +void sha256_transf(sha256_ctx *ctx, const unsigned char *message, + unsigned int block_nb) +{ + uint32_t w[64]; + uint32_t wv[8]; + uint32_t t1, t2; + const unsigned char *sub_block; + int i; + + int j; + + for (i = 0; i < (int) block_nb; i++) { + sub_block = message + (i << 6); + + for (j = 0; j < 16; j++) { + PACK32(&sub_block[j << 2], &w[j]); + } + + for (j = 16; j < 64; j++) { + SHA256_SCR(j); + } + + for (j = 0; j < 8; j++) { + wv[j] = ctx->h[j]; + } + + for (j = 0; j < 64; j++) { + t1 = wv[7] + SHA256_F2(wv[4]) + CH(wv[4], wv[5], wv[6]) + + sha256_k[j] + w[j]; + t2 = SHA256_F1(wv[0]) + MAJ(wv[0], wv[1], wv[2]); + wv[7] = wv[6]; + wv[6] = wv[5]; + wv[5] = wv[4]; + wv[4] = wv[3] + t1; + wv[3] = wv[2]; + wv[2] = wv[1]; + wv[1] = wv[0]; + wv[0] = t1 + t2; + } + + for (j = 0; j < 8; j++) { + ctx->h[j] += wv[j]; + } + } +} +#endif +void sha256(const unsigned char *message, unsigned int len, unsigned char *digest) +{ + sha256_ctx ctx; + + sha256_init(&ctx); + sha256_update(&ctx, message, len); + sha256_final(&ctx, digest); +} + +void sha256_init(sha256_ctx *ctx) +{ + int i; + for (i = 0; i < 8; i++) { + ctx->h[i] = sha256_h0[i]; + } + + ctx->len = 0; + ctx->tot_len = 0; +} + +void sha256_update(sha256_ctx *ctx, const unsigned char *message, + unsigned int len) +{ + unsigned int block_nb; + unsigned int new_len, rem_len, tmp_len; + const unsigned char *shifted_message; + + tmp_len = SHA256_BLOCK_SIZE - ctx->len; + rem_len = len < tmp_len ? len : tmp_len; + + memcpy(&ctx->block[ctx->len], message, rem_len); + + if (ctx->len + len < SHA256_BLOCK_SIZE) { + ctx->len += len; + return; + } + + new_len = len - rem_len; + block_nb = new_len / SHA256_BLOCK_SIZE; + + shifted_message = message + rem_len; + + sha256_transf(ctx, ctx->block, 1); + sha256_transf(ctx, shifted_message, block_nb); + + rem_len = new_len % SHA256_BLOCK_SIZE; + + memcpy(ctx->block, &shifted_message[block_nb << 6], + rem_len); + + ctx->len = rem_len; + ctx->tot_len += (block_nb + 1) << 6; +} + +void sha256_final(sha256_ctx *ctx, unsigned char *digest) +{ + unsigned int block_nb; + unsigned int pm_len; + unsigned int len_b; + + int i; + + block_nb = (1 + ((SHA256_BLOCK_SIZE - 9) + < (ctx->len % SHA256_BLOCK_SIZE))); + + len_b = (ctx->tot_len + ctx->len) << 3; + pm_len = block_nb << 6; + + memset(ctx->block + ctx->len, 0, pm_len - ctx->len); + ctx->block[ctx->len] = 0x80; + UNPACK32(len_b, ctx->block + pm_len - 4); + + sha256_transf(ctx, ctx->block, block_nb); + + for (i = 0 ; i < 8; i++) { + UNPACK32(ctx->h[i], &digest[i << 2]); + } +} diff --git a/solo-ckpool-source/src/sha2.h b/solo-ckpool-source/src/sha2.h new file mode 100644 index 0000000..d470112 --- /dev/null +++ b/solo-ckpool-source/src/sha2.h @@ -0,0 +1,69 @@ +/* + * FIPS 180-2 SHA-224/256/384/512 implementation + * Last update: 02/02/2007 + * Issue date: 04/30/2005 + * + * Copyright (C) 2013-2014, Con Kolivas + * Copyright (C) 2005, 2007 Olivier Gay + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the project nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include "config.h" + +#ifndef SHA2_H +#define SHA2_H + +#define SHA256_DIGEST_SIZE ( 256 / 8) +#define SHA256_BLOCK_SIZE ( 512 / 8) + +#define SHFR(x, n) (x >> n) +#define ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n))) +#define CH(x, y, z) ((x & y) ^ (~x & z)) +#define MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z)) + +#define SHA256_F1(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22)) +#define SHA256_F2(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25)) +#define SHA256_F3(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SHFR(x, 3)) +#define SHA256_F4(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SHFR(x, 10)) + +typedef struct { + unsigned int tot_len; + unsigned int len; + unsigned char block[2 * SHA256_BLOCK_SIZE]; + uint32_t h[8]; +} sha256_ctx; + +extern uint32_t sha256_k[64]; + +void sha256_init(sha256_ctx * ctx); +void sha256_update(sha256_ctx *ctx, const unsigned char *message, + unsigned int len); +void sha256_final(sha256_ctx *ctx, unsigned char *digest); +void sha256(const unsigned char *message, unsigned int len, + unsigned char *digest); + +#endif /* !SHA2_H */ diff --git a/solo-ckpool-source/src/sha256_code_release/open_software_license.txt b/solo-ckpool-source/src/sha256_code_release/open_software_license.txt new file mode 100644 index 0000000..44a2002 --- /dev/null +++ b/solo-ckpool-source/src/sha256_code_release/open_software_license.txt @@ -0,0 +1,32 @@ +Copyright (c) 2012, Intel Corporation + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +* Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the + distribution. + +* Neither the name of the Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + + +THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/solo-ckpool-source/src/sha256_code_release/sha256_avx1.asm b/solo-ckpool-source/src/sha256_code_release/sha256_avx1.asm new file mode 100644 index 0000000..7dcafaa --- /dev/null +++ b/solo-ckpool-source/src/sha256_code_release/sha256_avx1.asm @@ -0,0 +1,588 @@ +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; Copyright (c) 2012, Intel Corporation +; +; All rights reserved. +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are +; met: +; +; * Redistributions of source code must retain the above copyright +; notice, this list of conditions and the following disclaimer. +; +; * Redistributions in binary form must reproduce the above copyright +; notice, this list of conditions and the following disclaimer in the +; documentation and/or other materials provided with the +; distribution. +; +; * Neither the name of the Intel Corporation nor the names of its +; contributors may be used to endorse or promote products derived from +; this software without specific prior written permission. +; +; +; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY +; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR +; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; Example YASM command lines: +; Windows: yasm -Xvc -f x64 -rnasm -pnasm -o sha256_avx1.obj -g cv8 sha256_avx1.asm +; Linux: yasm -f x64 -f elf64 -X gnu -g dwarf2 -D LINUX -o sha256_avx1.o sha256_avx1.asm +; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; This code is described in an Intel White-Paper: +; "Fast SHA-256 Implementations on Intel Architecture Processors" +; +; To find it, surf to http://www.intel.com/p/en_US/embedded +; and search for that title. +; The paper is expected to be released roughly at the end of April, 2012 +; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; This code schedules 1 blocks at a time, with 4 lanes per block +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +%define VMOVDQ vmovdqu ;; assume buffers not aligned + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros + +; addm [mem], reg +; Add reg to mem using reg-mem add and store +%macro addm 2 + add %2, %1 + mov %1, %2 +%endm + +%macro MY_ROR 2 + shld %1,%1,(32-(%2)) +%endm + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +; COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask +; Load xmm with mem and byte swap each dword +%macro COPY_XMM_AND_BSWAP 3 + VMOVDQ %1, %2 + vpshufb %1, %1, %3 +%endmacro + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +%define X0 xmm4 +%define X1 xmm5 +%define X2 xmm6 +%define X3 xmm7 + +%define XTMP0 xmm0 +%define XTMP1 xmm1 +%define XTMP2 xmm2 +%define XTMP3 xmm3 +%define XTMP4 xmm8 +%define XFER xmm9 +%define XTMP5 xmm11 + +%define SHUF_00BA xmm10 ; shuffle xBxA -> 00BA +%define SHUF_DC00 xmm12 ; shuffle xDxC -> DC00 +%define BYTE_FLIP_MASK xmm13 + +%ifdef LINUX +%define NUM_BLKS rdx ; 3rd arg +%define CTX rsi ; 2nd arg +%define INP rdi ; 1st arg + +%define SRND rdi ; clobbers INP +%define c ecx +%define d r8d +%define e edx +%else +%define NUM_BLKS r8 ; 3rd arg +%define CTX rdx ; 2nd arg +%define INP rcx ; 1st arg + +%define SRND rcx ; clobbers INP +%define c edi +%define d esi +%define e r8d + +%endif +%define TBL rbp +%define a eax +%define b ebx + +%define f r9d +%define g r10d +%define h r11d + +%define y0 r13d +%define y1 r14d +%define y2 r15d + + +_INP_END_SIZE equ 8 +_INP_SIZE equ 8 +_XFER_SIZE equ 8 +%ifdef LINUX +_XMM_SAVE_SIZE equ 0 +%else +_XMM_SAVE_SIZE equ 8*16 +%endif +; STACK_SIZE plus pushes must be an odd multiple of 8 +_ALIGN_SIZE equ 8 + +_INP_END equ 0 +_INP equ _INP_END + _INP_END_SIZE +_XFER equ _INP + _INP_SIZE +_XMM_SAVE equ _XFER + _XFER_SIZE + _ALIGN_SIZE +STACK_SIZE equ _XMM_SAVE + _XMM_SAVE_SIZE + +; rotate_Xs +; Rotate values of symbols X0...X3 +%macro rotate_Xs 0 +%xdefine X_ X0 +%xdefine X0 X1 +%xdefine X1 X2 +%xdefine X2 X3 +%xdefine X3 X_ +%endm + +; ROTATE_ARGS +; Rotate values of symbols a...h +%macro ROTATE_ARGS 0 +%xdefine TMP_ h +%xdefine h g +%xdefine g f +%xdefine f e +%xdefine e d +%xdefine d c +%xdefine c b +%xdefine b a +%xdefine a TMP_ +%endm + +%macro FOUR_ROUNDS_AND_SCHED 0 + ;; compute s0 four at a time and s1 two at a time + ;; compute W[-16] + W[-7] 4 at a time + ;vmovdqa XTMP0, X3 + mov y0, e ; y0 = e + MY_ROR y0, (25-11) ; y0 = e >> (25-11) + mov y1, a ; y1 = a + vpalignr XTMP0, X3, X2, 4 ; XTMP0 = W[-7] + MY_ROR y1, (22-13) ; y1 = a >> (22-13) + xor y0, e ; y0 = e ^ (e >> (25-11)) + mov y2, f ; y2 = f + MY_ROR y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6)) + ;vmovdqa XTMP1, X1 + xor y1, a ; y1 = a ^ (a >> (22-13) + xor y2, g ; y2 = f^g + vpaddd XTMP0, XTMP0, X0 ; XTMP0 = W[-7] + W[-16] + xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + and y2, e ; y2 = (f^g)&e + MY_ROR y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2)) + ;; compute s0 + vpalignr XTMP1, X1, X0, 4 ; XTMP1 = W[-15] + xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + MY_ROR y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + xor y2, g ; y2 = CH = ((f^g)&e)^g + + + MY_ROR y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + add y2, y0 ; y2 = S1 + CH + add y2, [rsp + _XFER + 0*4] ; y2 = k + w + S1 + CH + + mov y0, a ; y0 = a + add h, y2 ; h = h + S1 + CH + k + w + mov y2, a ; y2 = a + + vpsrld XTMP2, XTMP1, 7 + + or y0, c ; y0 = a|c + add d, h ; d = d + h + S1 + CH + k + w + and y2, c ; y2 = a&c + + vpslld XTMP3, XTMP1, (32-7) + + and y0, b ; y0 = (a|c)&b + add h, y1 ; h = h + S1 + CH + k + w + S0 + + vpor XTMP3, XTMP3, XTMP2 ; XTMP1 = W[-15] MY_ROR 7 + + or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c) + add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ + +ROTATE_ARGS + + mov y0, e ; y0 = e + mov y1, a ; y1 = a + + + MY_ROR y0, (25-11) ; y0 = e >> (25-11) + xor y0, e ; y0 = e ^ (e >> (25-11)) + mov y2, f ; y2 = f + MY_ROR y1, (22-13) ; y1 = a >> (22-13) + + vpsrld XTMP2, XTMP1,18 + + xor y1, a ; y1 = a ^ (a >> (22-13) + MY_ROR y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6)) + xor y2, g ; y2 = f^g + + vpsrld XTMP4, XTMP1, 3 ; XTMP4 = W[-15] >> 3 + + MY_ROR y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2)) + xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + and y2, e ; y2 = (f^g)&e + MY_ROR y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + + vpslld XTMP1, XTMP1, (32-18) + + xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + xor y2, g ; y2 = CH = ((f^g)&e)^g + + vpxor XTMP3, XTMP3, XTMP1 + + add y2, y0 ; y2 = S1 + CH + add y2, [rsp + _XFER + 1*4] ; y2 = k + w + S1 + CH + MY_ROR y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + + vpxor XTMP3, XTMP3, XTMP2 ; XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR 18 + + mov y0, a ; y0 = a + add h, y2 ; h = h + S1 + CH + k + w + mov y2, a ; y2 = a + + vpxor XTMP1, XTMP3, XTMP4 ; XTMP1 = s0 + + or y0, c ; y0 = a|c + add d, h ; d = d + h + S1 + CH + k + w + and y2, c ; y2 = a&c + ;; compute low s1 + vpshufd XTMP2, X3, 11111010b ; XTMP2 = W[-2] {BBAA} + and y0, b ; y0 = (a|c)&b + add h, y1 ; h = h + S1 + CH + k + w + S0 + vpaddd XTMP0, XTMP0, XTMP1 ; XTMP0 = W[-16] + W[-7] + s0 + or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c) + add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ + +ROTATE_ARGS + ;vmovdqa XTMP3, XTMP2 ; XTMP3 = W[-2] {BBAA} + + mov y0, e ; y0 = e + mov y1, a ; y1 = a + MY_ROR y0, (25-11) ; y0 = e >> (25-11) + + ;vmovdqa XTMP4, XTMP2 ; XTMP4 = W[-2] {BBAA} + + xor y0, e ; y0 = e ^ (e >> (25-11)) + MY_ROR y1, (22-13) ; y1 = a >> (22-13) + mov y2, f ; y2 = f + xor y1, a ; y1 = a ^ (a >> (22-13) + MY_ROR y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6)) + + vpsrld XTMP4, XTMP2, 10 ; XTMP4 = W[-2] >> 10 {BBAA} + + xor y2, g ; y2 = f^g + + vpsrlq XTMP3, XTMP2, 19 ; XTMP3 = W[-2] MY_ROR 19 {xBxA} + + xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + and y2, e ; y2 = (f^g)&e + + vpsrlq XTMP2, XTMP2, 17 ; XTMP2 = W[-2] MY_ROR 17 {xBxA} + + MY_ROR y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2)) + xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + xor y2, g ; y2 = CH = ((f^g)&e)^g + MY_ROR y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + vpxor XTMP2, XTMP2, XTMP3 + add y2, y0 ; y2 = S1 + CH + MY_ROR y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + add y2, [rsp + _XFER + 2*4] ; y2 = k + w + S1 + CH + vpxor XTMP4, XTMP4, XTMP2 ; XTMP4 = s1 {xBxA} + mov y0, a ; y0 = a + add h, y2 ; h = h + S1 + CH + k + w + mov y2, a ; y2 = a + vpshufb XTMP4, XTMP4, SHUF_00BA ; XTMP4 = s1 {00BA} + or y0, c ; y0 = a|c + add d, h ; d = d + h + S1 + CH + k + w + and y2, c ; y2 = a&c + vpaddd XTMP0, XTMP0, XTMP4 ; XTMP0 = {..., ..., W[1], W[0]} + and y0, b ; y0 = (a|c)&b + add h, y1 ; h = h + S1 + CH + k + w + S0 + ;; compute high s1 + vpshufd XTMP2, XTMP0, 01010000b ; XTMP2 = W[-2] {DDCC} + or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c) + add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ + +ROTATE_ARGS + ;vmovdqa XTMP3, XTMP2 ; XTMP3 = W[-2] {DDCC} + mov y0, e ; y0 = e + MY_ROR y0, (25-11) ; y0 = e >> (25-11) + mov y1, a ; y1 = a + ;vmovdqa XTMP5, XTMP2 ; XTMP5 = W[-2] {DDCC} + MY_ROR y1, (22-13) ; y1 = a >> (22-13) + xor y0, e ; y0 = e ^ (e >> (25-11)) + mov y2, f ; y2 = f + MY_ROR y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6)) + + vpsrld XTMP5, XTMP2, 10 ; XTMP5 = W[-2] >> 10 {DDCC} + + xor y1, a ; y1 = a ^ (a >> (22-13) + xor y2, g ; y2 = f^g + + vpsrlq XTMP3, XTMP2, 19 ; XTMP3 = W[-2] MY_ROR 19 {xDxC} + + xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + and y2, e ; y2 = (f^g)&e + MY_ROR y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2)) + + vpsrlq XTMP2, XTMP2, 17 ; XTMP2 = W[-2] MY_ROR 17 {xDxC} + + xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + MY_ROR y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + xor y2, g ; y2 = CH = ((f^g)&e)^g + + vpxor XTMP2, XTMP2, XTMP3 + + MY_ROR y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + add y2, y0 ; y2 = S1 + CH + add y2, [rsp + _XFER + 3*4] ; y2 = k + w + S1 + CH + vpxor XTMP5, XTMP5, XTMP2 ; XTMP5 = s1 {xDxC} + mov y0, a ; y0 = a + add h, y2 ; h = h + S1 + CH + k + w + mov y2, a ; y2 = a + vpshufb XTMP5, XTMP5, SHUF_DC00 ; XTMP5 = s1 {DC00} + or y0, c ; y0 = a|c + add d, h ; d = d + h + S1 + CH + k + w + and y2, c ; y2 = a&c + vpaddd X0, XTMP5, XTMP0 ; X0 = {W[3], W[2], W[1], W[0]} + and y0, b ; y0 = (a|c)&b + add h, y1 ; h = h + S1 + CH + k + w + S0 + or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c) + add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ + +ROTATE_ARGS +rotate_Xs +%endm + +;; input is [rsp + _XFER + %1 * 4] +%macro DO_ROUND 1 + mov y0, e ; y0 = e + MY_ROR y0, (25-11) ; y0 = e >> (25-11) + mov y1, a ; y1 = a + xor y0, e ; y0 = e ^ (e >> (25-11)) + MY_ROR y1, (22-13) ; y1 = a >> (22-13) + mov y2, f ; y2 = f + xor y1, a ; y1 = a ^ (a >> (22-13) + MY_ROR y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6)) + xor y2, g ; y2 = f^g + xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + MY_ROR y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2)) + and y2, e ; y2 = (f^g)&e + xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + MY_ROR y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + xor y2, g ; y2 = CH = ((f^g)&e)^g + add y2, y0 ; y2 = S1 + CH + MY_ROR y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + add y2, [rsp + _XFER + %1 * 4] ; y2 = k + w + S1 + CH + mov y0, a ; y0 = a + add h, y2 ; h = h + S1 + CH + k + w + mov y2, a ; y2 = a + or y0, c ; y0 = a|c + add d, h ; d = d + h + S1 + CH + k + w + and y2, c ; y2 = a&c + and y0, b ; y0 = (a|c)&b + add h, y1 ; h = h + S1 + CH + k + w + S0 + or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c) + add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ + ROTATE_ARGS +%endm + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; void sha256_avx(void *input_data, UINT32 digest[8], UINT64 num_blks) +;; arg 1 : pointer to input data +;; arg 2 : pointer to digest +;; arg 3 : Num blocks +section .text +global sha256_avx +align 32 +sha256_avx: + push rbx +%ifndef LINUX + push rsi + push rdi +%endif + push rbp + push r13 + push r14 + push r15 + + sub rsp,STACK_SIZE +%ifndef LINUX + vmovdqa [rsp + _XMM_SAVE + 0*16],xmm6 + vmovdqa [rsp + _XMM_SAVE + 1*16],xmm7 + vmovdqa [rsp + _XMM_SAVE + 2*16],xmm8 + vmovdqa [rsp + _XMM_SAVE + 3*16],xmm9 + vmovdqa [rsp + _XMM_SAVE + 4*16],xmm10 + vmovdqa [rsp + _XMM_SAVE + 5*16],xmm11 + vmovdqa [rsp + _XMM_SAVE + 6*16],xmm12 + vmovdqa [rsp + _XMM_SAVE + 7*16],xmm13 +%endif + + shl NUM_BLKS, 6 ; convert to bytes + jz done_hash + add NUM_BLKS, INP ; pointer to end of data + mov [rsp + _INP_END], NUM_BLKS + + ;; load initial digest + mov a,[4*0 + CTX] + mov b,[4*1 + CTX] + mov c,[4*2 + CTX] + mov d,[4*3 + CTX] + mov e,[4*4 + CTX] + mov f,[4*5 + CTX] + mov g,[4*6 + CTX] + mov h,[4*7 + CTX] + + vmovdqa BYTE_FLIP_MASK, [PSHUFFLE_BYTE_FLIP_MASK wrt rip] + vmovdqa SHUF_00BA, [_SHUF_00BA wrt rip] + vmovdqa SHUF_DC00, [_SHUF_DC00 wrt rip] + +loop0: + lea TBL,[K256 wrt rip] + + ;; byte swap first 16 dwords + COPY_XMM_AND_BSWAP X0, [INP + 0*16], BYTE_FLIP_MASK + COPY_XMM_AND_BSWAP X1, [INP + 1*16], BYTE_FLIP_MASK + COPY_XMM_AND_BSWAP X2, [INP + 2*16], BYTE_FLIP_MASK + COPY_XMM_AND_BSWAP X3, [INP + 3*16], BYTE_FLIP_MASK + + mov [rsp + _INP], INP + + ;; schedule 48 input dwords, by doing 3 rounds of 16 each + mov SRND, 3 +align 16 +loop1: + vpaddd XFER, X0, [TBL + 0*16] + vmovdqa [rsp + _XFER], XFER + FOUR_ROUNDS_AND_SCHED + + vpaddd XFER, X0, [TBL + 1*16] + vmovdqa [rsp + _XFER], XFER + FOUR_ROUNDS_AND_SCHED + + vpaddd XFER, X0, [TBL + 2*16] + vmovdqa [rsp + _XFER], XFER + FOUR_ROUNDS_AND_SCHED + + vpaddd XFER, X0, [TBL + 3*16] + vmovdqa [rsp + _XFER], XFER + add TBL, 4*16 + FOUR_ROUNDS_AND_SCHED + + sub SRND, 1 + jne loop1 + + mov SRND, 2 +loop2: + vpaddd XFER, X0, [TBL + 0*16] + vmovdqa [rsp + _XFER], XFER + DO_ROUND 0 + DO_ROUND 1 + DO_ROUND 2 + DO_ROUND 3 + + vpaddd XFER, X1, [TBL + 1*16] + vmovdqa [rsp + _XFER], XFER + add TBL, 2*16 + DO_ROUND 0 + DO_ROUND 1 + DO_ROUND 2 + DO_ROUND 3 + + vmovdqa X0, X2 + vmovdqa X1, X3 + + sub SRND, 1 + jne loop2 + + + addm [4*0 + CTX],a + addm [4*1 + CTX],b + addm [4*2 + CTX],c + addm [4*3 + CTX],d + addm [4*4 + CTX],e + addm [4*5 + CTX],f + addm [4*6 + CTX],g + addm [4*7 + CTX],h + + mov INP, [rsp + _INP] + add INP, 64 + cmp INP, [rsp + _INP_END] + jne loop0 + +done_hash: +%ifndef LINUX + vmovdqa xmm6,[rsp + _XMM_SAVE + 0*16] + vmovdqa xmm7,[rsp + _XMM_SAVE + 1*16] + vmovdqa xmm8,[rsp + _XMM_SAVE + 2*16] + vmovdqa xmm9,[rsp + _XMM_SAVE + 3*16] + vmovdqa xmm10,[rsp + _XMM_SAVE + 4*16] + vmovdqa xmm11,[rsp + _XMM_SAVE + 5*16] + vmovdqa xmm12,[rsp + _XMM_SAVE + 6*16] + vmovdqa xmm13,[rsp + _XMM_SAVE + 7*16] +%endif + + + add rsp, STACK_SIZE + + pop r15 + pop r14 + pop r13 + pop rbp +%ifndef LINUX + pop rdi + pop rsi +%endif + pop rbx + + ret + + +section .data +align 64 +K256: + dd 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 + dd 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 + dd 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 + dd 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 + dd 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc + dd 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da + dd 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 + dd 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 + dd 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 + dd 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 + dd 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 + dd 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 + dd 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 + dd 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 + dd 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 + dd 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 + +PSHUFFLE_BYTE_FLIP_MASK: ddq 0x0c0d0e0f08090a0b0405060700010203 + +; shuffle xBxA -> 00BA +_SHUF_00BA: ddq 0xFFFFFFFFFFFFFFFF0b0a090803020100 + +; shuffle xDxC -> DC00 +_SHUF_DC00: ddq 0x0b0a090803020100FFFFFFFFFFFFFFFF + +section .note.GNU-stack noalloc noexec nowrite progbits diff --git a/solo-ckpool-source/src/sha256_code_release/sha256_avx2_rorx2.asm b/solo-ckpool-source/src/sha256_code_release/sha256_avx2_rorx2.asm new file mode 100644 index 0000000..226867b --- /dev/null +++ b/solo-ckpool-source/src/sha256_code_release/sha256_avx2_rorx2.asm @@ -0,0 +1,828 @@ +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; Copyright (c) 2012, Intel Corporation +; +; All rights reserved. +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are +; met: +; +; * Redistributions of source code must retain the above copyright +; notice, this list of conditions and the following disclaimer. +; +; * Redistributions in binary form must reproduce the above copyright +; notice, this list of conditions and the following disclaimer in the +; documentation and/or other materials provided with the +; distribution. +; +; * Neither the name of the Intel Corporation nor the names of its +; contributors may be used to endorse or promote products derived from +; this software without specific prior written permission. +; +; +; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY +; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR +; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; Example YASM command lines: +; Windows: yasm -Xvc -f x64 -rnasm -pnasm -o sha256_avx2_rorx2.obj -g cv8 sha256_avx2_rorx2.asm +; Linux: yasm -f x64 -f elf64 -X gnu -g dwarf2 -D LINUX -o sha256_avx2_rorx2.o sha256_avx2_rorx2.asm +; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; This code is described in an Intel White-Paper: +; "Fast SHA-256 Implementations on Intel Architecture Processors" +; +; To find it, surf to http://www.intel.com/p/en_US/embedded +; and search for that title. +; The paper is expected to be released roughly at the end of April, 2012 +; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; This code schedules 2 blocks at a time, with 4 lanes per block +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +%define VMOVDQ vmovdqu ;; assume buffers not aligned + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros + +; addm [mem], reg +; Add reg to mem using reg-mem add and store +%macro addm 2 + add %2, %1 + mov %1, %2 +%endm + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +%define X0 ymm4 +%define X1 ymm5 +%define X2 ymm6 +%define X3 ymm7 + +; XMM versions of above +%define XWORD0 xmm4 +%define XWORD1 xmm5 +%define XWORD2 xmm6 +%define XWORD3 xmm7 + +%define XTMP0 ymm0 +%define XTMP1 ymm1 +%define XTMP2 ymm2 +%define XTMP3 ymm3 +%define XTMP4 ymm8 +%define XFER ymm9 +%define XTMP5 ymm11 + +%define SHUF_00BA ymm10 ; shuffle xBxA -> 00BA +%define SHUF_DC00 ymm12 ; shuffle xDxC -> DC00 +%define BYTE_FLIP_MASK ymm13 + +%define X_BYTE_FLIP_MASK xmm13 ; XMM version of BYTE_FLIP_MASK + +%ifdef LINUX +%define NUM_BLKS rdx ; 3rd arg +%define CTX rsi ; 2nd arg +%define INP rdi ; 1st arg +%define c ecx +%define d r8d +%define e edx ; clobbers NUM_BLKS +%define y3 edi ; clobbers INP +%else +%define NUM_BLKS r8 ; 3rd arg +%define CTX rdx ; 2nd arg +%define INP rcx ; 1st arg +%define c edi +%define d esi +%define e r8d ; clobbers NUM_BLKS +%define y3 ecx ; clobbers INP + +%endif + + +%define TBL rbp +%define SRND CTX ; SRND is same register as CTX + +%define a eax +%define b ebx +%define f r9d +%define g r10d +%define h r11d +%define old_h r11d + +%define T1 r12d +%define y0 r13d +%define y1 r14d +%define y2 r15d + + +_XFER_SIZE equ 2*64*4 ; 2 blocks, 64 rounds, 4 bytes/round +%ifdef LINUX +_XMM_SAVE_SIZE equ 0 +%else +_XMM_SAVE_SIZE equ 8*16 +%endif +_INP_END_SIZE equ 8 +_INP_SIZE equ 8 +_CTX_SIZE equ 8 +_RSP_SIZE equ 8 + +_XFER equ 0 +_XMM_SAVE equ _XFER + _XFER_SIZE +_INP_END equ _XMM_SAVE + _XMM_SAVE_SIZE +_INP equ _INP_END + _INP_END_SIZE +_CTX equ _INP + _INP_SIZE +_RSP equ _CTX + _CTX_SIZE +STACK_SIZE equ _RSP + _RSP_SIZE + +; rotate_Xs +; Rotate values of symbols X0...X3 +%macro rotate_Xs 0 +%xdefine X_ X0 +%xdefine X0 X1 +%xdefine X1 X2 +%xdefine X2 X3 +%xdefine X3 X_ +%endm + +; ROTATE_ARGS +; Rotate values of symbols a...h +%macro ROTATE_ARGS 0 +%xdefine old_h h +%xdefine TMP_ h +%xdefine h g +%xdefine g f +%xdefine f e +%xdefine e d +%xdefine d c +%xdefine c b +%xdefine b a +%xdefine a TMP_ +%endm + +%macro FOUR_ROUNDS_AND_SCHED 1 +%define %%XFER %1 +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; + + mov y3, a ; y3 = a ; MAJA + rorx y0, e, 25 ; y0 = e >> 25 ; S1A + rorx y1, e, 11 ; y1 = e >> 11 ; S1B + + add h, dword[%%XFER+0*4] ; h = k + w + h ; -- + or y3, c ; y3 = a|c ; MAJA + vpalignr XTMP0, X3, X2, 4 ; XTMP0 = W[-7] + mov y2, f ; y2 = f ; CH + rorx T1, a, 13 ; T1 = a >> 13 ; S0B + + xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ; S1 + xor y2, g ; y2 = f^g ; CH + vpaddd XTMP0, XTMP0, X0 ; XTMP0 = W[-7] + W[-16]; y1 = (e >> 6) ; S1 + rorx y1, e, 6 ; y1 = (e >> 6) ; S1 + + and y2, e ; y2 = (f^g)&e ; CH + xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 + rorx y1, a, 22 ; y1 = a >> 22 ; S0A + add d, h ; d = k + w + h + d ; -- + + and y3, b ; y3 = (a|c)&b ; MAJA + vpalignr XTMP1, X1, X0, 4 ; XTMP1 = W[-15] + xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ; S0 + rorx T1, a, 2 ; T1 = (a >> 2) ; S0 + + xor y2, g ; y2 = CH = ((f^g)&e)^g ; CH + vpsrld XTMP2, XTMP1, 7 + xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 + mov T1, a ; T1 = a ; MAJB + and T1, c ; T1 = a&c ; MAJB + + add y2, y0 ; y2 = S1 + CH ; -- + vpslld XTMP3, XTMP1, (32-7) + or y3, T1 ; y3 = MAJ = (a|c)&b)|(a&c) ; MAJ + add h, y1 ; h = k + w + h + S0 ; -- + + add d, y2 ; d = k + w + h + d + S1 + CH = d + t1 ; -- + vpor XTMP3, XTMP3, XTMP2 ; XTMP3 = W[-15] ror 7 + + vpsrld XTMP2, XTMP1,18 + add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- + add h, y3 ; h = t1 + S0 + MAJ ; -- + + +ROTATE_ARGS + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; + + + mov y3, a ; y3 = a ; MAJA + rorx y0, e, 25 ; y0 = e >> 25 ; S1A + rorx y1, e, 11 ; y1 = e >> 11 ; S1B + add h, dword[%%XFER+1*4] ; h = k + w + h ; -- + or y3, c ; y3 = a|c ; MAJA + + + vpsrld XTMP4, XTMP1, 3 ; XTMP4 = W[-15] >> 3 + mov y2, f ; y2 = f ; CH + rorx T1, a, 13 ; T1 = a >> 13 ; S0B + xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ; S1 + xor y2, g ; y2 = f^g ; CH + + + rorx y1, e, 6 ; y1 = (e >> 6) ; S1 + xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 + rorx y1, a, 22 ; y1 = a >> 22 ; S0A + and y2, e ; y2 = (f^g)&e ; CH + add d, h ; d = k + w + h + d ; -- + + vpslld XTMP1, XTMP1, (32-18) + and y3, b ; y3 = (a|c)&b ; MAJA + xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ; S0 + + vpxor XTMP3, XTMP3, XTMP1 + rorx T1, a, 2 ; T1 = (a >> 2) ; S0 + xor y2, g ; y2 = CH = ((f^g)&e)^g ; CH + + vpxor XTMP3, XTMP3, XTMP2 ; XTMP3 = W[-15] ror 7 ^ W[-15] ror 18 + xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 + mov T1, a ; T1 = a ; MAJB + and T1, c ; T1 = a&c ; MAJB + add y2, y0 ; y2 = S1 + CH ; -- + + vpxor XTMP1, XTMP3, XTMP4 ; XTMP1 = s0 + vpshufd XTMP2, X3, 11111010b ; XTMP2 = W[-2] {BBAA} + or y3, T1 ; y3 = MAJ = (a|c)&b)|(a&c) ; MAJ + add h, y1 ; h = k + w + h + S0 ; -- + + vpaddd XTMP0, XTMP0, XTMP1 ; XTMP0 = W[-16] + W[-7] + s0 + add d, y2 ; d = k + w + h + d + S1 + CH = d + t1 ; -- + add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- + add h, y3 ; h = t1 + S0 + MAJ ; -- + + vpsrld XTMP4, XTMP2, 10 ; XTMP4 = W[-2] >> 10 {BBAA} + + +ROTATE_ARGS + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 2 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; + + mov y3, a ; y3 = a ; MAJA + rorx y0, e, 25 ; y0 = e >> 25 ; S1A + add h, [%%XFER+2*4] ; h = k + w + h ; -- + + vpsrlq XTMP3, XTMP2, 19 ; XTMP3 = W[-2] ror 19 {xBxA} + rorx y1, e, 11 ; y1 = e >> 11 ; S1B + or y3, c ; y3 = a|c ; MAJA + mov y2, f ; y2 = f ; CH + xor y2, g ; y2 = f^g ; CH + + rorx T1, a, 13 ; T1 = a >> 13 ; S0B + xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ; S1 + vpsrlq XTMP2, XTMP2, 17 ; XTMP2 = W[-2] ror 17 {xBxA} + and y2, e ; y2 = (f^g)&e ; CH + + rorx y1, e, 6 ; y1 = (e >> 6) ; S1 + vpxor XTMP2, XTMP2, XTMP3 + add d, h ; d = k + w + h + d ; -- + and y3, b ; y3 = (a|c)&b ; MAJA + + xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 + rorx y1, a, 22 ; y1 = a >> 22 ; S0A + vpxor XTMP4, XTMP4, XTMP2 ; XTMP4 = s1 {xBxA} + xor y2, g ; y2 = CH = ((f^g)&e)^g ; CH + + vpshufb XTMP4, XTMP4, SHUF_00BA ; XTMP4 = s1 {00BA} + xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ; S0 + rorx T1, a, 2 ; T1 = (a >> 2) ; S0 + vpaddd XTMP0, XTMP0, XTMP4 ; XTMP0 = {..., ..., W[1], W[0]} + + xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 + mov T1, a ; T1 = a ; MAJB + and T1, c ; T1 = a&c ; MAJB + add y2, y0 ; y2 = S1 + CH ; -- + vpshufd XTMP2, XTMP0, 01010000b ; XTMP2 = W[-2] {DDCC} + + or y3, T1 ; y3 = MAJ = (a|c)&b)|(a&c) ; MAJ + add h, y1 ; h = k + w + h + S0 ; -- + add d, y2 ; d = k + w + h + d + S1 + CH = d + t1 ; -- + add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- + + add h, y3 ; h = t1 + S0 + MAJ ; -- + + +ROTATE_ARGS + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 3 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; + + mov y3, a ; y3 = a ; MAJA + rorx y0, e, 25 ; y0 = e >> 25 ; S1A + rorx y1, e, 11 ; y1 = e >> 11 ; S1B + add h, dword[%%XFER+3*4] ; h = k + w + h ; -- + or y3, c ; y3 = a|c ; MAJA + + + vpsrld XTMP5, XTMP2, 10 ; XTMP5 = W[-2] >> 10 {DDCC} + mov y2, f ; y2 = f ; CH + rorx T1, a, 13 ; T1 = a >> 13 ; S0B + xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ; S1 + xor y2, g ; y2 = f^g ; CH + + + vpsrlq XTMP3, XTMP2, 19 ; XTMP3 = W[-2] ror 19 {xDxC} + rorx y1, e, 6 ; y1 = (e >> 6) ; S1 + and y2, e ; y2 = (f^g)&e ; CH + add d, h ; d = k + w + h + d ; -- + and y3, b ; y3 = (a|c)&b ; MAJA + + vpsrlq XTMP2, XTMP2, 17 ; XTMP2 = W[-2] ror 17 {xDxC} + xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 + xor y2, g ; y2 = CH = ((f^g)&e)^g ; CH + + vpxor XTMP2, XTMP2, XTMP3 + rorx y1, a, 22 ; y1 = a >> 22 ; S0A + add y2, y0 ; y2 = S1 + CH ; -- + + vpxor XTMP5, XTMP5, XTMP2 ; XTMP5 = s1 {xDxC} + xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ; S0 + add d, y2 ; d = k + w + h + d + S1 + CH = d + t1 ; -- + + rorx T1, a, 2 ; T1 = (a >> 2) ; S0 + vpshufb XTMP5, XTMP5, SHUF_DC00 ; XTMP5 = s1 {DC00} + + vpaddd X0, XTMP5, XTMP0 ; X0 = {W[3], W[2], W[1], W[0]} + xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 + mov T1, a ; T1 = a ; MAJB + and T1, c ; T1 = a&c ; MAJB + or y3, T1 ; y3 = MAJ = (a|c)&b)|(a&c) ; MAJ + + add h, y1 ; h = k + w + h + S0 ; -- + add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- + add h, y3 ; h = t1 + S0 + MAJ ; -- + +ROTATE_ARGS +rotate_Xs +%endm + +%macro DO_4ROUNDS 1 +%define %%XFER %1 +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;; + + mov y2, f ; y2 = f ; CH + rorx y0, e, 25 ; y0 = e >> 25 ; S1A + rorx y1, e, 11 ; y1 = e >> 11 ; S1B + xor y2, g ; y2 = f^g ; CH + + xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ; S1 + rorx y1, e, 6 ; y1 = (e >> 6) ; S1 + and y2, e ; y2 = (f^g)&e ; CH + + xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 + rorx T1, a, 13 ; T1 = a >> 13 ; S0B + xor y2, g ; y2 = CH = ((f^g)&e)^g ; CH + rorx y1, a, 22 ; y1 = a >> 22 ; S0A + mov y3, a ; y3 = a ; MAJA + + xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ; S0 + rorx T1, a, 2 ; T1 = (a >> 2) ; S0 + add h, dword[%%XFER + 4*0] ; h = k + w + h ; -- + or y3, c ; y3 = a|c ; MAJA + + xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 + mov T1, a ; T1 = a ; MAJB + and y3, b ; y3 = (a|c)&b ; MAJA + and T1, c ; T1 = a&c ; MAJB + add y2, y0 ; y2 = S1 + CH ; -- + + + add d, h ; d = k + w + h + d ; -- + or y3, T1 ; y3 = MAJ = (a|c)&b)|(a&c) ; MAJ + add h, y1 ; h = k + w + h + S0 ; -- + + add d, y2 ; d = k + w + h + d + S1 + CH = d + t1 ; -- + + + ;add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- + + ;add h, y3 ; h = t1 + S0 + MAJ ; -- + + ROTATE_ARGS + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 1 ;;;;;;;;;;;;;;;;;;;;;;;;;;; + + add old_h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- + mov y2, f ; y2 = f ; CH + rorx y0, e, 25 ; y0 = e >> 25 ; S1A + rorx y1, e, 11 ; y1 = e >> 11 ; S1B + xor y2, g ; y2 = f^g ; CH + + xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ; S1 + rorx y1, e, 6 ; y1 = (e >> 6) ; S1 + and y2, e ; y2 = (f^g)&e ; CH + add old_h, y3 ; h = t1 + S0 + MAJ ; -- + + xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 + rorx T1, a, 13 ; T1 = a >> 13 ; S0B + xor y2, g ; y2 = CH = ((f^g)&e)^g ; CH + rorx y1, a, 22 ; y1 = a >> 22 ; S0A + mov y3, a ; y3 = a ; MAJA + + xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ; S0 + rorx T1, a, 2 ; T1 = (a >> 2) ; S0 + add h, dword[%%XFER + 4*1] ; h = k + w + h ; -- + or y3, c ; y3 = a|c ; MAJA + + xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 + mov T1, a ; T1 = a ; MAJB + and y3, b ; y3 = (a|c)&b ; MAJA + and T1, c ; T1 = a&c ; MAJB + add y2, y0 ; y2 = S1 + CH ; -- + + + add d, h ; d = k + w + h + d ; -- + or y3, T1 ; y3 = MAJ = (a|c)&b)|(a&c) ; MAJ + add h, y1 ; h = k + w + h + S0 ; -- + + add d, y2 ; d = k + w + h + d + S1 + CH = d + t1 ; -- + + + ;add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- + + ;add h, y3 ; h = t1 + S0 + MAJ ; -- + + ROTATE_ARGS + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 2 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + + add old_h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- + mov y2, f ; y2 = f ; CH + rorx y0, e, 25 ; y0 = e >> 25 ; S1A + rorx y1, e, 11 ; y1 = e >> 11 ; S1B + xor y2, g ; y2 = f^g ; CH + + xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ; S1 + rorx y1, e, 6 ; y1 = (e >> 6) ; S1 + and y2, e ; y2 = (f^g)&e ; CH + add old_h, y3 ; h = t1 + S0 + MAJ ; -- + + xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 + rorx T1, a, 13 ; T1 = a >> 13 ; S0B + xor y2, g ; y2 = CH = ((f^g)&e)^g ; CH + rorx y1, a, 22 ; y1 = a >> 22 ; S0A + mov y3, a ; y3 = a ; MAJA + + xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ; S0 + rorx T1, a, 2 ; T1 = (a >> 2) ; S0 + add h, dword[%%XFER + 4*2] ; h = k + w + h ; -- + or y3, c ; y3 = a|c ; MAJA + + xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 + mov T1, a ; T1 = a ; MAJB + and y3, b ; y3 = (a|c)&b ; MAJA + and T1, c ; T1 = a&c ; MAJB + add y2, y0 ; y2 = S1 + CH ; -- + + + add d, h ; d = k + w + h + d ; -- + or y3, T1 ; y3 = MAJ = (a|c)&b)|(a&c) ; MAJ + add h, y1 ; h = k + w + h + S0 ; -- + + add d, y2 ; d = k + w + h + d + S1 + CH = d + t1 ; -- + + + ;add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- + + ;add h, y3 ; h = t1 + S0 + MAJ ; -- + + ROTATE_ARGS + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 3 ;;;;;;;;;;;;;;;;;;;;;;;;;;; + + add old_h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- + mov y2, f ; y2 = f ; CH + rorx y0, e, 25 ; y0 = e >> 25 ; S1A + rorx y1, e, 11 ; y1 = e >> 11 ; S1B + xor y2, g ; y2 = f^g ; CH + + xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ; S1 + rorx y1, e, 6 ; y1 = (e >> 6) ; S1 + and y2, e ; y2 = (f^g)&e ; CH + add old_h, y3 ; h = t1 + S0 + MAJ ; -- + + xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 + rorx T1, a, 13 ; T1 = a >> 13 ; S0B + xor y2, g ; y2 = CH = ((f^g)&e)^g ; CH + rorx y1, a, 22 ; y1 = a >> 22 ; S0A + mov y3, a ; y3 = a ; MAJA + + xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ; S0 + rorx T1, a, 2 ; T1 = (a >> 2) ; S0 + add h, dword[%%XFER + 4*3] ; h = k + w + h ; -- + or y3, c ; y3 = a|c ; MAJA + + xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 + mov T1, a ; T1 = a ; MAJB + and y3, b ; y3 = (a|c)&b ; MAJA + and T1, c ; T1 = a&c ; MAJB + add y2, y0 ; y2 = S1 + CH ; -- + + + add d, h ; d = k + w + h + d ; -- + or y3, T1 ; y3 = MAJ = (a|c)&b)|(a&c) ; MAJ + add h, y1 ; h = k + w + h + S0 ; -- + + add d, y2 ; d = k + w + h + d + S1 + CH = d + t1 ; -- + + + add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- + + add h, y3 ; h = t1 + S0 + MAJ ; -- + + ROTATE_ARGS + +%endm + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; void sha256_rorx(void *input_data, UINT32 digest[8], UINT64 num_blks) +;; arg 1 : pointer to input data +;; arg 2 : pointer to digest +;; arg 3 : Num blocks +section .text +global sha256_rorx +align 32 +sha256_rorx: + push rbx +%ifndef LINUX + push rsi + push rdi +%endif + push rbp + push r12 + push r13 + push r14 + push r15 + + mov rax, rsp + sub rsp,STACK_SIZE + and rsp, -32 + mov [rsp + _RSP], rax + +%ifndef LINUX + vmovdqa [rsp + _XMM_SAVE + 0*16],xmm6 + vmovdqa [rsp + _XMM_SAVE + 1*16],xmm7 + vmovdqa [rsp + _XMM_SAVE + 2*16],xmm8 + vmovdqa [rsp + _XMM_SAVE + 3*16],xmm9 + vmovdqa [rsp + _XMM_SAVE + 4*16],xmm10 + vmovdqa [rsp + _XMM_SAVE + 5*16],xmm11 + vmovdqa [rsp + _XMM_SAVE + 6*16],xmm12 + vmovdqa [rsp + _XMM_SAVE + 7*16],xmm13 +%endif + + shl NUM_BLKS, 6 ; convert to bytes + jz done_hash + lea NUM_BLKS, [NUM_BLKS + INP - 64] ; pointer to last block + mov [rsp + _INP_END], NUM_BLKS + + cmp INP, NUM_BLKS + je only_one_block + + ;; load initial digest + mov a,[4*0 + CTX] + mov b,[4*1 + CTX] + mov c,[4*2 + CTX] + mov d,[4*3 + CTX] + mov e,[4*4 + CTX] + mov f,[4*5 + CTX] + mov g,[4*6 + CTX] + mov h,[4*7 + CTX] + + vmovdqa BYTE_FLIP_MASK, [PSHUFFLE_BYTE_FLIP_MASK wrt rip] + vmovdqa SHUF_00BA, [_SHUF_00BA wrt rip] + vmovdqa SHUF_DC00, [_SHUF_DC00 wrt rip] + + mov [rsp + _CTX], CTX + +loop0: + lea TBL,[K256 wrt rip] + + ;; Load first 16 dwords from two blocks + VMOVDQ XTMP0, [INP + 0*32] + VMOVDQ XTMP1, [INP + 1*32] + VMOVDQ XTMP2, [INP + 2*32] + VMOVDQ XTMP3, [INP + 3*32] + + ;; byte swap data + vpshufb XTMP0, XTMP0, BYTE_FLIP_MASK + vpshufb XTMP1, XTMP1, BYTE_FLIP_MASK + vpshufb XTMP2, XTMP2, BYTE_FLIP_MASK + vpshufb XTMP3, XTMP3, BYTE_FLIP_MASK + + ;; transpose data into high/low halves + vperm2i128 X0, XTMP0, XTMP2, 0x20 + vperm2i128 X1, XTMP0, XTMP2, 0x31 + vperm2i128 X2, XTMP1, XTMP3, 0x20 + vperm2i128 X3, XTMP1, XTMP3, 0x31 + +last_block_enter: + add INP, 64 + mov [rsp + _INP], INP + + ;; schedule 48 input dwords, by doing 3 rounds of 12 each + xor SRND, SRND + +align 16 +loop1: + vpaddd XFER, X0, [TBL + SRND + 0*32] + vmovdqa [rsp + _XFER + SRND + 0*32], XFER + FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 0*32 + + vpaddd XFER, X0, [TBL + SRND + 1*32] + vmovdqa [rsp + _XFER + SRND + 1*32], XFER + FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 1*32 + + vpaddd XFER, X0, [TBL + SRND + 2*32] + vmovdqa [rsp + _XFER + SRND + 2*32], XFER + FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 2*32 + + vpaddd XFER, X0, [TBL + SRND + 3*32] + vmovdqa [rsp + _XFER + SRND + 3*32], XFER + FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 3*32 + + add SRND, 4*32 + cmp SRND, 3 * 4*32 + jb loop1 + +loop2: + ;; Do last 16 rounds with no scheduling + vpaddd XFER, X0, [TBL + SRND + 0*32] + vmovdqa [rsp + _XFER + SRND + 0*32], XFER + DO_4ROUNDS rsp + _XFER + SRND + 0*32 + vpaddd XFER, X1, [TBL + SRND + 1*32] + vmovdqa [rsp + _XFER + SRND + 1*32], XFER + DO_4ROUNDS rsp + _XFER + SRND + 1*32 + add SRND, 2*32 + + vmovdqa X0, X2 + vmovdqa X1, X3 + + cmp SRND, 4 * 4*32 + jb loop2 + + mov CTX, [rsp + _CTX] + mov INP, [rsp + _INP] + + addm [4*0 + CTX],a + addm [4*1 + CTX],b + addm [4*2 + CTX],c + addm [4*3 + CTX],d + addm [4*4 + CTX],e + addm [4*5 + CTX],f + addm [4*6 + CTX],g + addm [4*7 + CTX],h + + cmp INP, [rsp + _INP_END] + ja done_hash + + ;;;; Do second block using previously scheduled results + xor SRND, SRND +align 16 +loop3: + DO_4ROUNDS rsp + _XFER + SRND + 0*32 + 16 + DO_4ROUNDS rsp + _XFER + SRND + 1*32 + 16 + add SRND, 2*32 + cmp SRND, 4 * 4*32 + jb loop3 + + mov CTX, [rsp + _CTX] + mov INP, [rsp + _INP] + add INP, 64 + + addm [4*0 + CTX],a + addm [4*1 + CTX],b + addm [4*2 + CTX],c + addm [4*3 + CTX],d + addm [4*4 + CTX],e + addm [4*5 + CTX],f + addm [4*6 + CTX],g + addm [4*7 + CTX],h + + cmp INP, [rsp + _INP_END] + jb loop0 + ja done_hash + +do_last_block: + ;;;; do last block + lea TBL,[K256 wrt rip] + + VMOVDQ XWORD0, [INP + 0*16] + VMOVDQ XWORD1, [INP + 1*16] + VMOVDQ XWORD2, [INP + 2*16] + VMOVDQ XWORD3, [INP + 3*16] + + vpshufb XWORD0, XWORD0, X_BYTE_FLIP_MASK + vpshufb XWORD1, XWORD1, X_BYTE_FLIP_MASK + vpshufb XWORD2, XWORD2, X_BYTE_FLIP_MASK + vpshufb XWORD3, XWORD3, X_BYTE_FLIP_MASK + + jmp last_block_enter + +only_one_block: + + ;; load initial digest + mov a,[4*0 + CTX] + mov b,[4*1 + CTX] + mov c,[4*2 + CTX] + mov d,[4*3 + CTX] + mov e,[4*4 + CTX] + mov f,[4*5 + CTX] + mov g,[4*6 + CTX] + mov h,[4*7 + CTX] + + vmovdqa BYTE_FLIP_MASK, [PSHUFFLE_BYTE_FLIP_MASK wrt rip] + vmovdqa SHUF_00BA, [_SHUF_00BA wrt rip] + vmovdqa SHUF_DC00, [_SHUF_DC00 wrt rip] + + mov [rsp + _CTX], CTX + jmp do_last_block + +done_hash: +%ifndef LINUX + vmovdqa xmm6,[rsp + _XMM_SAVE + 0*16] + vmovdqa xmm7,[rsp + _XMM_SAVE + 1*16] + vmovdqa xmm8,[rsp + _XMM_SAVE + 2*16] + vmovdqa xmm9,[rsp + _XMM_SAVE + 3*16] + vmovdqa xmm10,[rsp + _XMM_SAVE + 4*16] + vmovdqa xmm11,[rsp + _XMM_SAVE + 5*16] + vmovdqa xmm12,[rsp + _XMM_SAVE + 6*16] + vmovdqa xmm13,[rsp + _XMM_SAVE + 7*16] +%endif + + mov rsp, [rsp + _RSP] + + pop r15 + pop r14 + pop r13 + pop r12 + pop rbp +%ifndef LINUX + pop rdi + pop rsi +%endif + pop rbx + + ret + +section .data +align 64 +K256: + dd 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 + dd 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 + dd 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 + dd 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 + dd 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 + dd 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 + dd 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 + dd 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 + dd 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc + dd 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc + dd 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da + dd 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da + dd 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 + dd 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 + dd 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 + dd 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 + dd 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 + dd 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 + dd 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 + dd 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 + dd 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 + dd 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 + dd 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 + dd 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 + dd 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 + dd 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 + dd 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 + dd 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 + dd 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 + dd 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 + dd 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 + dd 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 + +PSHUFFLE_BYTE_FLIP_MASK: + ddq 0x0c0d0e0f08090a0b0405060700010203,0x0c0d0e0f08090a0b0405060700010203 + +; shuffle xBxA -> 00BA +_SHUF_00BA: + ddq 0xFFFFFFFFFFFFFFFF0b0a090803020100,0xFFFFFFFFFFFFFFFF0b0a090803020100 + +; shuffle xDxC -> DC00 +_SHUF_DC00: + ddq 0x0b0a090803020100FFFFFFFFFFFFFFFF,0x0b0a090803020100FFFFFFFFFFFFFFFF + +section .note.GNU-stack noalloc noexec nowrite progbits diff --git a/solo-ckpool-source/src/sha256_code_release/sha256_sse4.asm b/solo-ckpool-source/src/sha256_code_release/sha256_sse4.asm new file mode 100644 index 0000000..2d828e1 --- /dev/null +++ b/solo-ckpool-source/src/sha256_code_release/sha256_sse4.asm @@ -0,0 +1,546 @@ +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; Copyright (c) 2012, Intel Corporation +; +; All rights reserved. +; +; Redistribution and use in source and binary forms, with or without +; modification, are permitted provided that the following conditions are +; met: +; +; * Redistributions of source code must retain the above copyright +; notice, this list of conditions and the following disclaimer. +; +; * Redistributions in binary form must reproduce the above copyright +; notice, this list of conditions and the following disclaimer in the +; documentation and/or other materials provided with the +; distribution. +; +; * Neither the name of the Intel Corporation nor the names of its +; contributors may be used to endorse or promote products derived from +; this software without specific prior written permission. +; +; +; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY +; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR +; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; Example YASM command lines: +; Windows: yasm -Xvc -f x64 -rnasm -pnasm -o sha256_sse4.obj -g cv8 sha256_sse4.asm +; Linux: yasm -f x64 -f elf64 -X gnu -g dwarf2 -D LINUX -o sha256_sse4.o sha256_sse4.asm +; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; +; This code is described in an Intel White-Paper: +; "Fast SHA-256 Implementations on Intel Architecture Processors" +; +; To find it, surf to http://www.intel.com/p/en_US/embedded +; and search for that title. +; The paper is expected to be released roughly at the end of April, 2012 +; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +; This code schedules 1 blocks at a time, with 4 lanes per block +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +%define MOVDQ movdqu ;; assume buffers not aligned + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros + +; addm [mem], reg +; Add reg to mem using reg-mem add and store +%macro addm 2 + add %2, %1 + mov %1, %2 +%endm + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +; COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask +; Load xmm with mem and byte swap each dword +%macro COPY_XMM_AND_BSWAP 3 + MOVDQ %1, %2 + pshufb %1, %3 +%endmacro + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +%define X0 xmm4 +%define X1 xmm5 +%define X2 xmm6 +%define X3 xmm7 + +%define XTMP0 xmm0 +%define XTMP1 xmm1 +%define XTMP2 xmm2 +%define XTMP3 xmm3 +%define XTMP4 xmm8 +%define XFER xmm9 + +%define SHUF_00BA xmm10 ; shuffle xBxA -> 00BA +%define SHUF_DC00 xmm11 ; shuffle xDxC -> DC00 +%define BYTE_FLIP_MASK xmm12 + +%ifdef LINUX +%define NUM_BLKS rdx ; 3rd arg +%define CTX rsi ; 2nd arg +%define INP rdi ; 1st arg + +%define SRND rdi ; clobbers INP +%define c ecx +%define d r8d +%define e edx +%else +%define NUM_BLKS r8 ; 3rd arg +%define CTX rdx ; 2nd arg +%define INP rcx ; 1st arg + +%define SRND rcx ; clobbers INP +%define c edi +%define d esi +%define e r8d + +%endif +%define TBL rbp +%define a eax +%define b ebx + +%define f r9d +%define g r10d +%define h r11d + +%define y0 r13d +%define y1 r14d +%define y2 r15d + + + +_INP_END_SIZE equ 8 +_INP_SIZE equ 8 +_XFER_SIZE equ 8 +%ifdef LINUX +_XMM_SAVE_SIZE equ 0 +%else +_XMM_SAVE_SIZE equ 7*16 +%endif +; STACK_SIZE plus pushes must be an odd multiple of 8 +_ALIGN_SIZE equ 8 + +_INP_END equ 0 +_INP equ _INP_END + _INP_END_SIZE +_XFER equ _INP + _INP_SIZE +_XMM_SAVE equ _XFER + _XFER_SIZE + _ALIGN_SIZE +STACK_SIZE equ _XMM_SAVE + _XMM_SAVE_SIZE + +; rotate_Xs +; Rotate values of symbols X0...X3 +%macro rotate_Xs 0 +%xdefine X_ X0 +%xdefine X0 X1 +%xdefine X1 X2 +%xdefine X2 X3 +%xdefine X3 X_ +%endm + +; ROTATE_ARGS +; Rotate values of symbols a...h +%macro ROTATE_ARGS 0 +%xdefine TMP_ h +%xdefine h g +%xdefine g f +%xdefine f e +%xdefine e d +%xdefine d c +%xdefine c b +%xdefine b a +%xdefine a TMP_ +%endm + +%macro FOUR_ROUNDS_AND_SCHED 0 + ;; compute s0 four at a time and s1 two at a time + ;; compute W[-16] + W[-7] 4 at a time + movdqa XTMP0, X3 + mov y0, e ; y0 = e + ror y0, (25-11) ; y0 = e >> (25-11) + mov y1, a ; y1 = a + palignr XTMP0, X2, 4 ; XTMP0 = W[-7] + ror y1, (22-13) ; y1 = a >> (22-13) + xor y0, e ; y0 = e ^ (e >> (25-11)) + mov y2, f ; y2 = f + ror y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6)) + movdqa XTMP1, X1 + xor y1, a ; y1 = a ^ (a >> (22-13) + xor y2, g ; y2 = f^g + paddd XTMP0, X0 ; XTMP0 = W[-7] + W[-16] + xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + and y2, e ; y2 = (f^g)&e + ror y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2)) + ;; compute s0 + palignr XTMP1, X0, 4 ; XTMP1 = W[-15] + xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + ror y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + xor y2, g ; y2 = CH = ((f^g)&e)^g + movdqa XTMP2, XTMP1 ; XTMP2 = W[-15] + ror y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + add y2, y0 ; y2 = S1 + CH + add y2, [rsp + _XFER + 0*4] ; y2 = k + w + S1 + CH + movdqa XTMP3, XTMP1 ; XTMP3 = W[-15] + mov y0, a ; y0 = a + add h, y2 ; h = h + S1 + CH + k + w + mov y2, a ; y2 = a + pslld XTMP1, (32-7) + or y0, c ; y0 = a|c + add d, h ; d = d + h + S1 + CH + k + w + and y2, c ; y2 = a&c + psrld XTMP2, 7 + and y0, b ; y0 = (a|c)&b + add h, y1 ; h = h + S1 + CH + k + w + S0 + por XTMP1, XTMP2 ; XTMP1 = W[-15] ror 7 + or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c) + add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ + +ROTATE_ARGS + movdqa XTMP2, XTMP3 ; XTMP2 = W[-15] + mov y0, e ; y0 = e + mov y1, a ; y1 = a + movdqa XTMP4, XTMP3 ; XTMP4 = W[-15] + ror y0, (25-11) ; y0 = e >> (25-11) + xor y0, e ; y0 = e ^ (e >> (25-11)) + mov y2, f ; y2 = f + ror y1, (22-13) ; y1 = a >> (22-13) + pslld XTMP3, (32-18) + xor y1, a ; y1 = a ^ (a >> (22-13) + ror y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6)) + xor y2, g ; y2 = f^g + psrld XTMP2, 18 + ror y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2)) + xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + and y2, e ; y2 = (f^g)&e + ror y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + pxor XTMP1, XTMP3 + xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + xor y2, g ; y2 = CH = ((f^g)&e)^g + psrld XTMP4, 3 ; XTMP4 = W[-15] >> 3 + add y2, y0 ; y2 = S1 + CH + add y2, [rsp + _XFER + 1*4] ; y2 = k + w + S1 + CH + ror y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + pxor XTMP1, XTMP2 ; XTMP1 = W[-15] ror 7 ^ W[-15] ror 18 + mov y0, a ; y0 = a + add h, y2 ; h = h + S1 + CH + k + w + mov y2, a ; y2 = a + pxor XTMP1, XTMP4 ; XTMP1 = s0 + or y0, c ; y0 = a|c + add d, h ; d = d + h + S1 + CH + k + w + and y2, c ; y2 = a&c + ;; compute low s1 + pshufd XTMP2, X3, 11111010b ; XTMP2 = W[-2] {BBAA} + and y0, b ; y0 = (a|c)&b + add h, y1 ; h = h + S1 + CH + k + w + S0 + paddd XTMP0, XTMP1 ; XTMP0 = W[-16] + W[-7] + s0 + or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c) + add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ + +ROTATE_ARGS + movdqa XTMP3, XTMP2 ; XTMP3 = W[-2] {BBAA} + mov y0, e ; y0 = e + mov y1, a ; y1 = a + ror y0, (25-11) ; y0 = e >> (25-11) + movdqa XTMP4, XTMP2 ; XTMP4 = W[-2] {BBAA} + xor y0, e ; y0 = e ^ (e >> (25-11)) + ror y1, (22-13) ; y1 = a >> (22-13) + mov y2, f ; y2 = f + xor y1, a ; y1 = a ^ (a >> (22-13) + ror y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6)) + psrlq XTMP2, 17 ; XTMP2 = W[-2] ror 17 {xBxA} + xor y2, g ; y2 = f^g + psrlq XTMP3, 19 ; XTMP3 = W[-2] ror 19 {xBxA} + xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + and y2, e ; y2 = (f^g)&e + psrld XTMP4, 10 ; XTMP4 = W[-2] >> 10 {BBAA} + ror y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2)) + xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + xor y2, g ; y2 = CH = ((f^g)&e)^g + ror y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + pxor XTMP2, XTMP3 + add y2, y0 ; y2 = S1 + CH + ror y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + add y2, [rsp + _XFER + 2*4] ; y2 = k + w + S1 + CH + pxor XTMP4, XTMP2 ; XTMP4 = s1 {xBxA} + mov y0, a ; y0 = a + add h, y2 ; h = h + S1 + CH + k + w + mov y2, a ; y2 = a + pshufb XTMP4, SHUF_00BA ; XTMP4 = s1 {00BA} + or y0, c ; y0 = a|c + add d, h ; d = d + h + S1 + CH + k + w + and y2, c ; y2 = a&c + paddd XTMP0, XTMP4 ; XTMP0 = {..., ..., W[1], W[0]} + and y0, b ; y0 = (a|c)&b + add h, y1 ; h = h + S1 + CH + k + w + S0 + ;; compute high s1 + pshufd XTMP2, XTMP0, 01010000b ; XTMP2 = W[-2] {DDCC} + or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c) + add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ + +ROTATE_ARGS + movdqa XTMP3, XTMP2 ; XTMP3 = W[-2] {DDCC} + mov y0, e ; y0 = e + ror y0, (25-11) ; y0 = e >> (25-11) + mov y1, a ; y1 = a + movdqa X0, XTMP2 ; X0 = W[-2] {DDCC} + ror y1, (22-13) ; y1 = a >> (22-13) + xor y0, e ; y0 = e ^ (e >> (25-11)) + mov y2, f ; y2 = f + ror y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6)) + psrlq XTMP2, 17 ; XTMP2 = W[-2] ror 17 {xDxC} + xor y1, a ; y1 = a ^ (a >> (22-13) + xor y2, g ; y2 = f^g + psrlq XTMP3, 19 ; XTMP3 = W[-2] ror 19 {xDxC} + xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + and y2, e ; y2 = (f^g)&e + ror y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2)) + psrld X0, 10 ; X0 = W[-2] >> 10 {DDCC} + xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + ror y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + xor y2, g ; y2 = CH = ((f^g)&e)^g + pxor XTMP2, XTMP3 + ror y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + add y2, y0 ; y2 = S1 + CH + add y2, [rsp + _XFER + 3*4] ; y2 = k + w + S1 + CH + pxor X0, XTMP2 ; X0 = s1 {xDxC} + mov y0, a ; y0 = a + add h, y2 ; h = h + S1 + CH + k + w + mov y2, a ; y2 = a + pshufb X0, SHUF_DC00 ; X0 = s1 {DC00} + or y0, c ; y0 = a|c + add d, h ; d = d + h + S1 + CH + k + w + and y2, c ; y2 = a&c + paddd X0, XTMP0 ; X0 = {W[3], W[2], W[1], W[0]} + and y0, b ; y0 = (a|c)&b + add h, y1 ; h = h + S1 + CH + k + w + S0 + or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c) + add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ + +ROTATE_ARGS +rotate_Xs +%endm + +;; input is [rsp + _XFER + %1 * 4] +%macro DO_ROUND 1 + mov y0, e ; y0 = e + ror y0, (25-11) ; y0 = e >> (25-11) + mov y1, a ; y1 = a + xor y0, e ; y0 = e ^ (e >> (25-11)) + ror y1, (22-13) ; y1 = a >> (22-13) + mov y2, f ; y2 = f + xor y1, a ; y1 = a ^ (a >> (22-13) + ror y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6)) + xor y2, g ; y2 = f^g + xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) + ror y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2)) + and y2, e ; y2 = (f^g)&e + xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) + ror y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) + xor y2, g ; y2 = CH = ((f^g)&e)^g + add y2, y0 ; y2 = S1 + CH + ror y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) + add y2, [rsp + _XFER + %1 * 4] ; y2 = k + w + S1 + CH + mov y0, a ; y0 = a + add h, y2 ; h = h + S1 + CH + k + w + mov y2, a ; y2 = a + or y0, c ; y0 = a|c + add d, h ; d = d + h + S1 + CH + k + w + and y2, c ; y2 = a&c + and y0, b ; y0 = (a|c)&b + add h, y1 ; h = h + S1 + CH + k + w + S0 + or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c) + add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ + ROTATE_ARGS +%endm + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; void sha256_sse4(void *input_data, UINT32 digest[8], UINT64 num_blks) +;; arg 1 : pointer to input data +;; arg 2 : pointer to digest +;; arg 3 : Num blocks +section .text +global sha256_sse4 +align 32 +sha256_sse4: + push rbx +%ifndef LINUX + push rsi + push rdi +%endif + push rbp + push r13 + push r14 + push r15 + + sub rsp,STACK_SIZE +%ifndef LINUX + movdqa [rsp + _XMM_SAVE + 0*16],xmm6 + movdqa [rsp + _XMM_SAVE + 1*16],xmm7 + movdqa [rsp + _XMM_SAVE + 2*16],xmm8 + movdqa [rsp + _XMM_SAVE + 3*16],xmm9 + movdqa [rsp + _XMM_SAVE + 4*16],xmm10 + movdqa [rsp + _XMM_SAVE + 5*16],xmm11 + movdqa [rsp + _XMM_SAVE + 6*16],xmm12 +%endif + + shl NUM_BLKS, 6 ; convert to bytes + jz done_hash + add NUM_BLKS, INP ; pointer to end of data + mov [rsp + _INP_END], NUM_BLKS + + ;; load initial digest + mov a,[4*0 + CTX] + mov b,[4*1 + CTX] + mov c,[4*2 + CTX] + mov d,[4*3 + CTX] + mov e,[4*4 + CTX] + mov f,[4*5 + CTX] + mov g,[4*6 + CTX] + mov h,[4*7 + CTX] + + movdqa BYTE_FLIP_MASK, [PSHUFFLE_BYTE_FLIP_MASK wrt rip] + movdqa SHUF_00BA, [_SHUF_00BA wrt rip] + movdqa SHUF_DC00, [_SHUF_DC00 wrt rip] + +loop0: + lea TBL,[K256 wrt rip] + + ;; byte swap first 16 dwords + COPY_XMM_AND_BSWAP X0, [INP + 0*16], BYTE_FLIP_MASK + COPY_XMM_AND_BSWAP X1, [INP + 1*16], BYTE_FLIP_MASK + COPY_XMM_AND_BSWAP X2, [INP + 2*16], BYTE_FLIP_MASK + COPY_XMM_AND_BSWAP X3, [INP + 3*16], BYTE_FLIP_MASK + + mov [rsp + _INP], INP + + ;; schedule 48 input dwords, by doing 3 rounds of 16 each + mov SRND, 3 +align 16 +loop1: + movdqa XFER, [TBL + 0*16] + paddd XFER, X0 + movdqa [rsp + _XFER], XFER + FOUR_ROUNDS_AND_SCHED + + movdqa XFER, [TBL + 1*16] + paddd XFER, X0 + movdqa [rsp + _XFER], XFER + FOUR_ROUNDS_AND_SCHED + + movdqa XFER, [TBL + 2*16] + paddd XFER, X0 + movdqa [rsp + _XFER], XFER + FOUR_ROUNDS_AND_SCHED + + movdqa XFER, [TBL + 3*16] + paddd XFER, X0 + movdqa [rsp + _XFER], XFER + add TBL, 4*16 + FOUR_ROUNDS_AND_SCHED + + sub SRND, 1 + jne loop1 + + mov SRND, 2 +loop2: + paddd X0, [TBL + 0*16] + movdqa [rsp + _XFER], X0 + DO_ROUND 0 + DO_ROUND 1 + DO_ROUND 2 + DO_ROUND 3 + paddd X1, [TBL + 1*16] + movdqa [rsp + _XFER], X1 + add TBL, 2*16 + DO_ROUND 0 + DO_ROUND 1 + DO_ROUND 2 + DO_ROUND 3 + + movdqa X0, X2 + movdqa X1, X3 + + sub SRND, 1 + jne loop2 + + addm [4*0 + CTX],a + addm [4*1 + CTX],b + addm [4*2 + CTX],c + addm [4*3 + CTX],d + addm [4*4 + CTX],e + addm [4*5 + CTX],f + addm [4*6 + CTX],g + addm [4*7 + CTX],h + + mov INP, [rsp + _INP] + add INP, 64 + cmp INP, [rsp + _INP_END] + jne loop0 + +done_hash: +%ifndef LINUX + movdqa xmm6,[rsp + _XMM_SAVE + 0*16] + movdqa xmm7,[rsp + _XMM_SAVE + 1*16] + movdqa xmm8,[rsp + _XMM_SAVE + 2*16] + movdqa xmm9,[rsp + _XMM_SAVE + 3*16] + movdqa xmm10,[rsp + _XMM_SAVE + 4*16] + movdqa xmm11,[rsp + _XMM_SAVE + 5*16] + movdqa xmm12,[rsp + _XMM_SAVE + 6*16] +%endif + + add rsp, STACK_SIZE + + pop r15 + pop r14 + pop r13 + pop rbp +%ifndef LINUX + pop rdi + pop rsi +%endif + pop rbx + + ret + + +section .data +align 64 +K256: + dd 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 + dd 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 + dd 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 + dd 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 + dd 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc + dd 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da + dd 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 + dd 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 + dd 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 + dd 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 + dd 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 + dd 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 + dd 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 + dd 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 + dd 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 + dd 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 + +PSHUFFLE_BYTE_FLIP_MASK: ddq 0x0c0d0e0f08090a0b0405060700010203 + +; shuffle xBxA -> 00BA +_SHUF_00BA: ddq 0xFFFFFFFFFFFFFFFF0b0a090803020100 + +; shuffle xDxC -> DC00 +_SHUF_DC00: ddq 0x0b0a090803020100FFFFFFFFFFFFFFFF + +section .note.GNU-stack noalloc noexec nowrite progbits diff --git a/solo-ckpool-source/src/stratifier.c b/solo-ckpool-source/src/stratifier.c new file mode 100644 index 0000000..0d91852 --- /dev/null +++ b/solo-ckpool-source/src/stratifier.c @@ -0,0 +1,8617 @@ +/* + * Copyright 2014-2020,2023 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#include "config.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef HAVE_ZMQ_H +#include +#endif + +#include "ckpool.h" +#include "libckpool.h" +#include "bitcoin.h" +#include "sha2.h" +#include "stratifier.h" +#include "uthash.h" +#include "utlist.h" +#include "connector.h" +#include "generator.h" + +/* Consistent across all pool instances */ +static const char *workpadding = "000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000"; +static const char *scriptsig_header = "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff"; +static uchar scriptsig_header_bin[41]; +static const double nonces = 4294967296; + +/* Add unaccounted shares when they arrive, remove them with each update of + * rolling stats. */ +struct pool_stats { + tv_t start_time; + ts_t last_update; + + int workers; + int users; + int disconnected; + + int remote_workers; + int remote_users; + + /* Absolute shares stats */ + int64_t unaccounted_shares; + int64_t accounted_shares; + + /* Cycle of 32 to determine which users to dump stats on */ + uint8_t userstats_cycle; + + /* Shares per second for 1/5/15/60 minute rolling averages */ + double sps1; + double sps5; + double sps15; + double sps60; + + /* Diff shares stats */ + int64_t unaccounted_diff_shares; + int64_t accounted_diff_shares; + int64_t unaccounted_rejects; + int64_t accounted_rejects; + + /* Diff shares per second for 1/5/15... minute rolling averages */ + double dsps1; + double dsps5; + double dsps15; + double dsps60; + double dsps360; + double dsps1440; + double dsps10080; + + double network_diff; + double best_diff; +}; + +typedef struct pool_stats pool_stats_t; + +typedef struct genwork workbase_t; + +struct json_params { + json_t *method; + json_t *params; + json_t *id_val; + int64_t client_id; +}; + +typedef struct json_params json_params_t; + +/* Stratum json messages with their associated client id */ +struct smsg { + json_t *json_msg; + int64_t client_id; +}; + +typedef struct smsg smsg_t; + +struct userwb { + UT_hash_handle hh; + int64_t id; + + uchar *coinb2bin; // Coinb2 cointaining this user's address for generation + char *coinb2; + int coinb2len; // Length of user coinb2 +}; + +struct user_instance; +struct worker_instance; +struct stratum_instance; + +typedef struct user_instance user_instance_t; +typedef struct worker_instance worker_instance_t; +typedef struct stratum_instance stratum_instance_t; + +struct user_instance { + UT_hash_handle hh; + char username[128]; + int id; + char *secondaryuserid; + bool btcaddress; + bool script; + bool segwit; + + /* A linked list of all connected instances of this user */ + stratum_instance_t *clients; + + /* A linked list of all connected workers of this user */ + worker_instance_t *worker_instances; + + int workers; + int remote_workers; + char txnbin[48]; + int txnlen; + struct userwb *userwbs; /* Protected by instance lock */ + + double best_diff; /* Best share found by this user */ + int64_t best_ever; /* Best share ever found by this user */ + + int64_t shares; + + int64_t uadiff; /* Shares not yet accounted for in hashmeter */ + + double dsps1; /* Diff shares per second, 1 minute rolling average */ + double dsps5; /* ... 5 minute ... */ + double dsps60;/* etc */ + double dsps1440; + double dsps10080; + tv_t last_share; + tv_t last_decay; + + bool authorised; /* Has this username ever been authorised? */ + time_t auth_time; + time_t failed_authtime; /* Last time this username failed to authorise */ + int auth_backoff; /* How long to reject any auth attempts since last failure */ + bool throttled; /* Have we begun rejecting auth attempts */ +}; + +/* Combined data from workers with the same workername */ +struct worker_instance { + user_instance_t *user_instance; + char *workername; + + /* Number of stratum instances attached as this one worker */ + int instance_count; + + worker_instance_t *next; + worker_instance_t *prev; + + int64_t shares; + + int64_t uadiff; /* Shares not yet accounted for in hashmeter */ + + double dsps1; + double dsps5; + double dsps60; + double dsps1440; + double dsps10080; + tv_t last_share; + tv_t last_decay; + time_t start_time; + + double best_diff; /* Best share found by this worker */ + int64_t best_ever; /* Best share ever found by this worker */ + double mindiff; /* User chosen mindiff */ + + bool idle; + bool notified_idle; +}; + +typedef struct stratifier_data sdata_t; + +typedef struct proxy_base proxy_t; + +/* Per client stratum instance == workers */ +struct stratum_instance { + UT_hash_handle hh; + int64_t id; + + /* Virtualid used as unique local id for passthrough clients */ + int64_t virtualid; + + stratum_instance_t *recycled_next; + stratum_instance_t *recycled_prev; + + stratum_instance_t *user_next; + stratum_instance_t *user_prev; + + stratum_instance_t *node_next; + stratum_instance_t *node_prev; + + stratum_instance_t *remote_next; + stratum_instance_t *remote_prev; + + /* Descriptive of ID number and passthrough if any */ + char identity[128]; + + /* Reference count for when this instance is used outside of the + * instance_lock */ + int ref; + + char enonce1[36]; /* Fit up to 16 byte binary enonce1 */ + uchar enonce1bin[16]; + char enonce1var[20]; /* Fit up to 8 byte binary enonce1var */ + uint64_t enonce1_64; + int session_id; + + double diff; /* Current diff */ + double old_diff; /* Previous diff */ + int64_t diff_change_job_id; /* Last job_id we changed diff */ + + int64_t uadiff; /* Shares not yet accounted for in hashmeter */ + + double dsps1; /* Diff shares per second, 1 minute rolling average */ + double dsps5; /* ... 5 minute ... */ + double dsps60;/* etc */ + double dsps1440; + double dsps10080; + tv_t ldc; /* Last diff change */ + int ssdc; /* Shares since diff change */ + tv_t first_share; + tv_t last_share; + tv_t last_decay; + time_t first_invalid; /* Time of first invalid in run of non stale rejects */ + time_t upstream_invalid; /* As first_invalid but for upstream responses */ + time_t start_time; + + char address[INET6_ADDRSTRLEN]; + bool node; /* Is this a mining node */ + bool subscribed; + bool authorising; /* In progress, protected by instance_lock */ + bool authorised; + bool dropped; + bool idle; + int reject; /* Indicator that this client is having a run of rejects + * or other problem and should be dropped lazily if + * this is set to 2 */ + + int latency; /* Latency when on a mining node */ + + bool reconnect; /* This client really needs to reconnect */ + time_t reconnect_request; /* The time we sent a reconnect message */ + + user_instance_t *user_instance; + worker_instance_t *worker_instance; + + char *useragent; + char *workername; + char *password; + bool messages; /* Is this a client that understands stratum messages */ + int user_id; + int server; /* Which server is this instance bound to */ + + ckpool_t *ckp; + + time_t last_txns; /* Last time this worker requested txn hashes */ + time_t disconnected_time; /* Time this instance disconnected */ + + double suggest_diff; /* Stratum client suggested diff */ + double best_diff; /* Best share found by this instance */ + + sdata_t *sdata; /* Which sdata this client is bound to */ + proxy_t *proxy; /* Proxy this is bound to in proxy mode */ + int proxyid; /* Which proxy id */ + int subproxyid; /* Which subproxy */ + + bool passthrough; /* Is this a passthrough */ + bool trusted; /* Is this a trusted remote server */ + bool remote; /* Is this a remote client on a trusted remote server */ +}; + +struct share { + UT_hash_handle hh; + uchar hash[32]; + int64_t workbase_id; +}; + +typedef struct share share_t; + +struct proxy_base { + UT_hash_handle hh; + UT_hash_handle sh; /* For subproxy hashlist */ + proxy_t *next; /* For retired subproxies */ + proxy_t *prev; + int id; + int subid; + + /* Priority has the user id encoded in the high bits if it's not a + * global proxy. */ + int64_t priority; + + bool global; /* Is this a global proxy */ + int userid; /* Userid for non global proxies */ + + double diff; + + char baseurl[128]; + char url[128]; + char auth[128]; + char pass[128]; + char enonce1[32]; + uchar enonce1bin[16]; + int enonce1constlen; + int enonce1varlen; + + int nonce2len; + int enonce2varlen; + + bool subscribed; + bool notified; + + int64_t clients; /* Incrementing client count */ + int64_t max_clients; /* Maximum number of clients per subproxy */ + int64_t bound_clients; /* Currently actively bound clients */ + int64_t combined_clients; /* Total clients of all subproxies of a parent proxy */ + int64_t headroom; /* Temporary variable when calculating how many more clients can bind */ + + int subproxy_count; /* Number of subproxies */ + proxy_t *parent; /* Parent proxy of each subproxy */ + proxy_t *subproxies; /* Hashlist of subproxies sorted by subid */ + sdata_t *sdata; /* Unique stratifer data for each subproxy */ + bool dead; + bool deleted; +}; + +typedef struct session session_t; + +struct session { + UT_hash_handle hh; + int session_id; + uint64_t enonce1_64; + int64_t client_id; + int userid; + time_t added; + char address[INET6_ADDRSTRLEN]; +}; + +typedef struct txntable txntable_t; + +struct txntable { + UT_hash_handle hh; + int id; + char hash[68]; + char *data; + int refcount; + bool seen; +}; + +#define ID_AUTH 0 +#define ID_WORKINFO 1 +#define ID_AGEWORKINFO 2 +#define ID_SHARES 3 +#define ID_SHAREERR 4 +#define ID_POOLSTATS 5 +#define ID_WORKERSTATS 6 +#define ID_BLOCK 7 +#define ID_ADDRAUTH 8 +#define ID_HEARTBEAT 9 + +struct stratifier_data { + ckpool_t *ckp; + + char txnbin[48]; + int txnlen; + char dontxnbin[48]; + int dontxnlen; + + pool_stats_t stats; + /* Protects changes to pool stats */ + mutex_t stats_lock; + /* Protects changes to unaccounted pool stats */ + mutex_t uastats_lock; + + bool verbose; + + uint64_t enonce1_64; + + /* For protecting the txntable data */ + cklock_t txn_lock; + + /* For protecting the hashtable data */ + cklock_t workbase_lock; + + /* For the hashtable of all workbases */ + workbase_t *workbases; + workbase_t *current_workbase; + int workbases_generated; + txntable_t *txns; + int64_t txns_generated; + + /* Workbases from remote trusted servers */ + workbase_t *remote_workbases; + + /* Is this a node and unable to rebuild workinfos due to lack of txns */ + bool wbincomplete; + + /* Semaphore to serialise calls to add_base */ + sem_t update_sem; + /* Time we last sent out a stratum update */ + time_t update_time; + + int64_t workbase_id; + int64_t blockchange_id; + int session_id; + char lasthash[68]; + char lastswaphash[68]; + + ckmsgq_t *updateq; // Generator base work updates + ckmsgq_t *ssends; // Stratum sends + ckmsgq_t *srecvs; // Stratum receives + ckmsgq_t *sshareq; // Stratum share sends + ckmsgq_t *sauthq; // Stratum authorisations + ckmsgq_t *stxnq; // Transaction requests + + int user_instance_id; + + stratum_instance_t *stratum_instances; + stratum_instance_t *recycled_instances; + stratum_instance_t *node_instances; + stratum_instance_t *remote_instances; + + int64_t stratum_generated; + int64_t disconnected_generated; + int64_t userwbs_generated; + session_t *disconnected_sessions; + + user_instance_t *user_instances; + + /* Protects both stratum and user instances */ + cklock_t instance_lock; + + share_t *shares; + mutex_t share_lock; + + int64_t shares_generated; + + int proxy_count; /* Total proxies generated (not necessarily still alive) */ + proxy_t *proxy; /* Current proxy in use */ + proxy_t *proxies; /* Hashlist of all proxies */ + mutex_t proxy_lock; /* Protects all proxy data */ + proxy_t *subproxy; /* Which subproxy this sdata belongs to in proxy mode */ +}; + +typedef struct json_entry json_entry_t; + +struct json_entry { + json_entry_t *next; + json_entry_t *prev; + json_t *val; +}; + +/* Priority levels for generator messages */ +#define GEN_LAX 0 +#define GEN_NORMAL 1 +#define GEN_PRIORITY 2 + +/* For storing a set of messages within another lock, allowing us to dump them + * to the log outside of lock */ +static void add_msg_entry(char_entry_t **entries, char **buf) +{ + char_entry_t *entry; + + if (!*buf) + return; + entry = ckalloc(sizeof(char_entry_t)); + entry->buf = *buf; + *buf = NULL; + DL_APPEND(*entries, entry); +} + +static void notice_msg_entries(char_entry_t **entries) +{ + char_entry_t *entry, *tmpentry; + + DL_FOREACH_SAFE(*entries, entry, tmpentry) { + DL_DELETE(*entries, entry); + LOGNOTICE("%s", entry->buf); + free(entry->buf); + free(entry); + } +} + +static void info_msg_entries(char_entry_t **entries) +{ + char_entry_t *entry, *tmpentry; + + DL_FOREACH_SAFE(*entries, entry, tmpentry) { + DL_DELETE(*entries, entry); + LOGINFO("%s", entry->buf); + free(entry->buf); + free(entry); + } +} + +static const int witnessdata_size = 36; // commitment header + hash + +static void generate_coinbase(ckpool_t *ckp, workbase_t *wb) +{ + uint64_t *u64, g64, d64 = 0; + sdata_t *sdata = ckp->sdata; + char header[272]; + int len, ofs = 0; + ts_t now; + + /* Set fixed length coinb1 arrays to be more than enough */ + wb->coinb1 = ckzalloc(256); + wb->coinb1bin = ckzalloc(128); + + /* Strings in wb should have been zero memset prior. Generate binary + * templates first, then convert to hex */ + memcpy(wb->coinb1bin, scriptsig_header_bin, 41); + ofs += 41; // Fixed header length; + + ofs++; // Script length is filled in at the end @wb->coinb1bin[41]; + + /* Put block height at start of template */ + len = ser_number(wb->coinb1bin + ofs, wb->height); + ofs += len; + + /* Followed by flag */ + len = strlen(wb->flags) / 2; + wb->coinb1bin[ofs++] = len; + hex2bin(wb->coinb1bin + ofs, wb->flags, len); + ofs += len; + + /* Followed by timestamp */ + ts_realtime(&now); + len = ser_number(wb->coinb1bin + ofs, now.tv_sec); + ofs += len; + + /* Followed by our unique randomiser based on the nsec timestamp */ + len = ser_number(wb->coinb1bin + ofs, now.tv_nsec); + ofs += len; + + wb->enonce1varlen = ckp->nonce1length; + wb->enonce2varlen = ckp->nonce2length; + wb->coinb1bin[ofs++] = wb->enonce1varlen + wb->enonce2varlen; + + wb->coinb1len = ofs; + + len = wb->coinb1len - 41; + + len += wb->enonce1varlen; + len += wb->enonce2varlen; + + wb->coinb2bin = ckzalloc(512); + memcpy(wb->coinb2bin, "\x0a\x63\x6b\x70\x6f\x6f\x6c", 7); + wb->coinb2len = 7; + if (ckp->btcsig) { + int siglen = strlen(ckp->btcsig); + + LOGDEBUG("Len %d sig %s", siglen, ckp->btcsig); + if (siglen) { + wb->coinb2bin[wb->coinb2len++] = siglen; + memcpy(wb->coinb2bin + wb->coinb2len, ckp->btcsig, siglen); + wb->coinb2len += siglen; + } + } + len += wb->coinb2len; + + wb->coinb1bin[41] = len - 1; /* Set the length now */ + __bin2hex(wb->coinb1, wb->coinb1bin, wb->coinb1len); + LOGDEBUG("Coinb1: %s", wb->coinb1); + /* Coinbase 1 complete */ + + memcpy(wb->coinb2bin + wb->coinb2len, "\xff\xff\xff\xff", 4); + wb->coinb2len += 4; + + // Generation value + g64 = wb->coinbasevalue; + if (ckp->donvalid && ckp->donation > 0) { + double dbl64 = (double)g64 / 100 * ckp->donation; + + d64 = dbl64; + g64 -= d64; // To guarantee integers add up to the original coinbasevalue + wb->coinb2bin[wb->coinb2len++] = 2 + wb->insert_witness; + } else + wb->coinb2bin[wb->coinb2len++] = 1 + wb->insert_witness; + + u64 = (uint64_t *)&wb->coinb2bin[wb->coinb2len]; + *u64 = htole64(g64); + wb->coinb2len += 8; + + /* Coinb2 address goes here, takes up 23~25 bytes + 1 byte for length */ + + wb->coinb3len = 0; + wb->coinb3bin = ckzalloc(256 + wb->insert_witness * (8 + witnessdata_size + 2)); + + if (ckp->donvalid && ckp->donation > 0) { + u64 = (uint64_t *)wb->coinb3bin; + *u64 = htole64(d64); + wb->coinb3len += 8; + + wb->coinb3bin[wb->coinb3len++] = sdata->dontxnlen; + memcpy(wb->coinb3bin + wb->coinb3len, sdata->dontxnbin, sdata->dontxnlen); + wb->coinb3len += sdata->dontxnlen; + } else + ckp->donation = 0; + + if (wb->insert_witness) { + // 0 value + wb->coinb3len += 8; + + wb->coinb3bin[wb->coinb3len++] = witnessdata_size + 2; // total scriptPubKey size + wb->coinb3bin[wb->coinb3len++] = 0x6a; // OP_RETURN + wb->coinb3bin[wb->coinb3len++] = witnessdata_size; + + hex2bin(&wb->coinb3bin[wb->coinb3len], wb->witnessdata, witnessdata_size); + wb->coinb3len += witnessdata_size; + } + + wb->coinb3len += 4; // Blank lock + + if (!ckp->btcsolo) { + int coinbase_len, offset = 0; + char *coinbase, *cb; + json_t *val = NULL; + + /* Append the generation address and coinb3 in !solo mode */ + wb->coinb2bin[wb->coinb2len++] = sdata->txnlen; + memcpy(wb->coinb2bin + wb->coinb2len, sdata->txnbin, sdata->txnlen); + wb->coinb2len += sdata->txnlen; + memcpy(wb->coinb2bin + wb->coinb2len, wb->coinb3bin, wb->coinb3len); + wb->coinb2len += wb->coinb3len; + wb->coinb3len = 0; + dealloc(wb->coinb3bin); + + /* Set this only once */ + if (unlikely(!ckp->coinbase_valid)) { + /* We have enough to test the validity of the coinbase here */ + coinbase_len = wb->coinb1len + ckp->nonce1length + ckp->nonce2length + wb->coinb2len; + coinbase = ckzalloc(coinbase_len); + memcpy(coinbase, wb->coinb1bin, wb->coinb1len); + offset += wb->coinb1len; + /* Space for nonce1 and 2 */ + offset += ckp->nonce1length + ckp->nonce2length; + memcpy(coinbase + offset, wb->coinb2bin, wb->coinb2len); + offset += wb->coinb2len; + cb = bin2hex(coinbase, offset); + LOGDEBUG("Coinbase txn %s", cb); + free(coinbase); + if (generator_checktxn(ckp, cb, &val)) { + char *s = json_dumps(val, JSON_NO_UTF8 | JSON_COMPACT); + + json_decref(val); + LOGNOTICE("Coinbase transaction confirmed valid"); + LOGDEBUG("%s", s); + free(s); + } else { + /* This is a fatal error */ + LOGEMERG("Coinbase failed valid transaction check, aborting!"); + exit(1); + } + free(cb); + ckp->coinbase_valid = true; + LOGWARNING("Mining from any incoming username to address %s", ckp->btcaddress); + if (ckp->donation) + LOGWARNING("%.1f percent donation to %s", ckp->donation, ckp->donaddress); + } + } else if (unlikely(!ckp->coinbase_valid)) { + /* Create a sample coinbase to test its validity in solo mode */ + int coinbase_len, offset = 0; + char *coinbase, *cb; + json_t *val = NULL; + + coinbase_len = wb->coinb1len + ckp->nonce1length + ckp->nonce2length + wb->coinb2len + + sdata->txnlen + wb->coinb3len + 1; + coinbase = ckzalloc(coinbase_len); + memcpy(coinbase, wb->coinb1bin, wb->coinb1len); + offset += wb->coinb1len; + offset += ckp->nonce1length + ckp->nonce2length; + memcpy(coinbase + offset, wb->coinb2bin, wb->coinb2len); + offset += wb->coinb2len; + coinbase[offset] = sdata->txnlen; + offset += 1; + memcpy(coinbase + offset, sdata->txnbin, sdata->txnlen); + offset += sdata->txnlen; + memcpy(coinbase + offset, wb->coinb3bin, wb->coinb3len); + offset += wb->coinb3len; + cb = bin2hex(coinbase, offset); + LOGDEBUG("Coinbase txn %s", cb); + free(coinbase); + if (generator_checktxn(ckp, cb, &val)) { + char *s = json_dumps(val, JSON_NO_UTF8 | JSON_COMPACT); + + json_decref(val); + LOGNOTICE("Coinbase transaction confirmed valid"); + LOGDEBUG("%s", s); + free(s); + } else { + /* This is a fatal error */ + LOGEMERG("Coinbase failed valid transaction check, aborting!"); + exit(1); + } + free(cb); + ckp->coinbase_valid = true; + LOGWARNING("Mining solo to any incoming valid BTC address username"); + if (ckp->donation) + LOGWARNING("%.1f percent donation to %s", ckp->donation, ckp->donaddress); + } + + /* Set this just for node compatibility, though it's unused */ + wb->coinb2 = bin2hex(wb->coinb2bin, wb->coinb2len); + LOGDEBUG("Coinb2: %s", wb->coinb2); + /* Coinbases 2 +/- 3 templates complete */ + + snprintf(header, 270, "%s%s%s%s%s%s%s", + wb->bbversion, wb->prevhash, + "0000000000000000000000000000000000000000000000000000000000000000", + wb->ntime, wb->nbit, + "00000000", /* nonce */ + workpadding); + header[224] = 0; + LOGDEBUG("Header: %s", header); + hex2bin(wb->headerbin, header, 112); +} + +static void stratum_broadcast_update(sdata_t *sdata, const workbase_t *wb, bool clean); +static void stratum_broadcast_updates(sdata_t *sdata, bool clean); + +static void clear_userwb(sdata_t *sdata, int64_t id) +{ + user_instance_t *instance, *tmp; + + ck_wlock(&sdata->instance_lock); + HASH_ITER(hh, sdata->user_instances, instance, tmp) { + struct userwb *userwb; + + HASH_FIND_I64(instance->userwbs, &id, userwb); + if (!userwb) + continue; + HASH_DEL(instance->userwbs, userwb); + free(userwb->coinb2bin); + free(userwb->coinb2); + free(userwb); + } + ck_wunlock(&sdata->instance_lock); +} + +static void clear_workbase(ckpool_t *ckp, workbase_t *wb) +{ + if (ckp->btcsolo) + clear_userwb(ckp->sdata, wb->id); + free(wb->flags); + free(wb->txn_data); + free(wb->txn_hashes); + free(wb->logdir); + free(wb->coinb1bin); + free(wb->coinb1); + free(wb->coinb2bin); + free(wb->coinb2); + free(wb->coinb3bin); + json_decref(wb->merkle_array); + if (wb->json) + json_decref(wb->json); + free(wb); +} + +/* Remove all shares with a workbase id less than wb_id for block changes */ +static void purge_share_hashtable(sdata_t *sdata, const int64_t wb_id) +{ + share_t *share, *tmp; + int purged = 0; + + mutex_lock(&sdata->share_lock); + HASH_ITER(hh, sdata->shares, share, tmp) { + if (share->workbase_id < wb_id) { + HASH_DEL(sdata->shares, share); + dealloc(share); + purged++; + } + } + mutex_unlock(&sdata->share_lock); + + if (purged) + LOGINFO("Cleared %d shares from share hashtable", purged); +} + +/* Remove all shares with a workbase id == wb_id being discarded */ +static void age_share_hashtable(sdata_t *sdata, const int64_t wb_id) +{ + share_t *share, *tmp; + int aged = 0; + + mutex_lock(&sdata->share_lock); + HASH_ITER(hh, sdata->shares, share, tmp) { + if (share->workbase_id == wb_id) { + HASH_DEL(sdata->shares, share); + dealloc(share); + aged++; + } + } + mutex_unlock(&sdata->share_lock); + + if (aged) + LOGINFO("Aged %d shares from share hashtable", aged); +} + +/* Append a bulk list already created to the ssends list */ +static void ssend_bulk_append(sdata_t *sdata, ckmsg_t *bulk_send, const int messages) +{ + ckmsgq_t *ssends = sdata->ssends; + + mutex_lock(ssends->lock); + ssends->messages += messages; + DL_CONCAT(ssends->msgs, bulk_send); + pthread_cond_signal(ssends->cond); + mutex_unlock(ssends->lock); +} + +/* As ssend_bulk_append but for high priority messages to be put at the front + * of the list. */ +static void ssend_bulk_prepend(sdata_t *sdata, ckmsg_t *bulk_send, const int messages) +{ + ckmsgq_t *ssends = sdata->ssends; + ckmsg_t *tmp; + + mutex_lock(ssends->lock); + tmp = ssends->msgs; + ssends->msgs = bulk_send; + ssends->messages += messages; + DL_CONCAT(ssends->msgs, tmp); + pthread_cond_signal(ssends->cond); + mutex_unlock(ssends->lock); +} + +/* Send a json msg to an upstream trusted remote server */ +static void upstream_json(ckpool_t *ckp, json_t *val) +{ + char *msg; + + msg = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER | JSON_COMPACT | JSON_EOL); + /* Connector absorbs and frees msg */ + connector_upstream_msg(ckp, msg); +} + +/* Upstream a json msgtype */ +static void upstream_json_msgtype(ckpool_t *ckp, json_t *val, const int msg_type) +{ + json_set_string(val, "method", stratum_msgs[msg_type]); + upstream_json(ckp, val); +} + +/* Upstream a json msgtype, duplicating the json */ +static void upstream_msgtype(ckpool_t *ckp, const json_t *val, const int msg_type) +{ + json_t *json_msg = json_deep_copy(val); + + json_set_string(json_msg, "method", stratum_msgs[msg_type]); + upstream_json(ckp, json_msg); + json_decref(json_msg); +} + +static void send_node_workinfo(ckpool_t *ckp, sdata_t *sdata, const workbase_t *wb) +{ + stratum_instance_t *client; + ckmsg_t *bulk_send = NULL; + int messages = 0; + json_t *wb_val; + + wb_val = json_object(); + + json_set_int(wb_val, "jobid", wb->mapped_id); + json_set_string(wb_val, "target", wb->target); + json_set_double(wb_val, "diff", wb->diff); + json_set_int(wb_val, "version", wb->version); + json_set_int(wb_val, "curtime", wb->curtime); + json_set_string(wb_val, "prevhash", wb->prevhash); + json_set_string(wb_val, "ntime", wb->ntime); + json_set_string(wb_val, "bbversion", wb->bbversion); + json_set_string(wb_val, "nbit", wb->nbit); + json_set_int(wb_val, "coinbasevalue", wb->coinbasevalue); + json_set_int(wb_val, "height", wb->height); + json_set_string(wb_val, "flags", wb->flags); + json_set_int(wb_val, "txns", wb->txns); + json_set_string(wb_val, "txn_hashes", wb->txn_hashes); + json_set_int(wb_val, "merkles", wb->merkles); + json_object_set_new_nocheck(wb_val, "merklehash", json_deep_copy(wb->merkle_array)); + json_set_string(wb_val, "coinb1", wb->coinb1); + json_set_int(wb_val, "enonce1varlen", wb->enonce1varlen); + json_set_int(wb_val, "enonce2varlen", wb->enonce2varlen); + json_set_int(wb_val, "coinb1len", wb->coinb1len); + json_set_int(wb_val, "coinb2len", wb->coinb2len); + json_set_string(wb_val, "coinb2", wb->coinb2); + + ck_rlock(&sdata->instance_lock); + DL_FOREACH2(sdata->node_instances, client, node_next) { + ckmsg_t *client_msg; + smsg_t *msg; + json_t *json_msg = json_deep_copy(wb_val); + + json_set_string(json_msg, "node.method", stratum_msgs[SM_WORKINFO]); + client_msg = ckalloc(sizeof(ckmsg_t)); + msg = ckzalloc(sizeof(smsg_t)); + msg->json_msg = json_msg; + msg->client_id = client->id; + client_msg->data = msg; + DL_APPEND(bulk_send, client_msg); + messages++; + } + DL_FOREACH2(sdata->remote_instances, client, remote_next) { + ckmsg_t *client_msg; + smsg_t *msg; + json_t *json_msg = json_deep_copy(wb_val); + + json_set_string(json_msg, "method", stratum_msgs[SM_WORKINFO]); + client_msg = ckalloc(sizeof(ckmsg_t)); + msg = ckzalloc(sizeof(smsg_t)); + msg->json_msg = json_msg; + msg->client_id = client->id; + client_msg->data = msg; + DL_APPEND(bulk_send, client_msg); + messages++; + } + ck_runlock(&sdata->instance_lock); + + if (ckp->remote) + upstream_msgtype(ckp, wb_val, SM_WORKINFO); + + json_decref(wb_val); + + if (bulk_send) { + LOGINFO("Sending workinfo to mining nodes"); + ssend_bulk_append(sdata, bulk_send, messages); + } +} + +static json_t *generate_workinfo(ckpool_t *ckp, const workbase_t *wb, const char *func) +{ + char cdfield[64]; + json_t *val; + + sprintf(cdfield, "%lu,%lu", wb->gentime.tv_sec, wb->gentime.tv_nsec); + + JSON_CPACK(val, "{sI,ss,ss,ss,ss,ss,ss,ss,ss,sI,so,ss,ss,ss,ss}", + "workinfoid", wb->id, + "poolinstance", ckp->name, + "transactiontree", wb->txn_hashes, + "prevhash", wb->prevhash, + "coinbase1", wb->coinb1, + "coinbase2", wb->coinb2, + "version", wb->bbversion, + "ntime", wb->ntime, + "bits", wb->nbit, + "reward", wb->coinbasevalue, + "merklehash", json_deep_copy(wb->merkle_array), + "createdate", cdfield, + "createby", "code", + "createcode", func, + "createinet", ckp->serverurl[0]); + return val; +} + +static void send_workinfo(ckpool_t *ckp, sdata_t *sdata, const workbase_t *wb) +{ + if (!ckp->proxy) + send_node_workinfo(ckp, sdata, wb); +} + +/* Entered with instance_lock held, make sure wb can't be pulled from us */ +static void __generate_userwb(sdata_t *sdata, workbase_t *wb, user_instance_t *user) +{ + struct userwb *userwb; + int64_t id = wb->id; + + /* Make sure this user doesn't have this userwb already */ + HASH_FIND_I64(user->userwbs, &id, userwb); + if (unlikely(userwb)) + return; + + sdata->userwbs_generated++; + userwb = ckzalloc(sizeof(struct userwb)); + userwb->id = id; + userwb->coinb2bin = ckalloc(wb->coinb2len + 1 + user->txnlen + wb->coinb3len); + memcpy(userwb->coinb2bin, wb->coinb2bin, wb->coinb2len); + userwb->coinb2len = wb->coinb2len; + userwb->coinb2bin[userwb->coinb2len++] = user->txnlen; + memcpy(userwb->coinb2bin + userwb->coinb2len, user->txnbin, user->txnlen); + userwb->coinb2len += user->txnlen; + memcpy(userwb->coinb2bin + userwb->coinb2len, wb->coinb3bin, wb->coinb3len); + userwb->coinb2len += wb->coinb3len; + userwb->coinb2 = bin2hex(userwb->coinb2bin, userwb->coinb2len); + HASH_ADD_I64(user->userwbs, id, userwb); +} + +static void generate_userwbs(sdata_t *sdata, workbase_t *wb) +{ + user_instance_t *instance, *tmp; + + ck_wlock(&sdata->instance_lock); + HASH_ITER(hh, sdata->user_instances, instance, tmp) { + if (!instance->btcaddress) + continue; + __generate_userwb(sdata, wb, instance); + } + ck_wunlock(&sdata->instance_lock); +} + +/* Add a new workbase to the table of workbases. Sdata is the global data in + * pool mode but unique to each subproxy in proxy mode */ +static void add_base(ckpool_t *ckp, sdata_t *sdata, workbase_t *wb, bool *new_block) +{ + sdata_t *ckp_sdata = ckp->sdata; + pool_stats_t *stats = &sdata->stats; + double old_diff = stats->network_diff; + workbase_t *tmp, *tmpa; + int len, ret; + + ts_realtime(&wb->gentime); + /* Stats network_diff is not protected by lock but is not a critical + * value */ + wb->network_diff = diff_from_nbits(wb->headerbin + 72); + if (wb->network_diff < 1) + wb->network_diff = 1; + stats->network_diff = wb->network_diff; + if (stats->network_diff != old_diff) + LOGWARNING("Network diff set to %.1f", stats->network_diff); + len = strlen(ckp->logdir) + 8 + 1 + 16 + 1; + wb->logdir = ckzalloc(len); + + /* In proxy mode, the wb->id is received in the notify update and + * we set workbase_id from it. In server mode the stratifier is + * setting the workbase_id */ + ck_wlock(&sdata->workbase_lock); + ckp_sdata->workbases_generated++; + if (!ckp->proxy) + wb->mapped_id = wb->id = sdata->workbase_id++; + else + sdata->workbase_id = wb->id; + if (strncmp(wb->prevhash, sdata->lasthash, 64)) { + char bin[32], swap[32]; + + *new_block = true; + memcpy(sdata->lasthash, wb->prevhash, 65); + hex2bin(bin, sdata->lasthash, 32); + swap_256(swap, bin); + __bin2hex(sdata->lastswaphash, swap, 32); + sdata->blockchange_id = wb->id; + } + if (*new_block && ckp->logshares) { + sprintf(wb->logdir, "%s%08x/", ckp->logdir, wb->height); + ret = mkdir(wb->logdir, 0750); + if (unlikely(ret && errno != EEXIST)) + LOGERR("Failed to create log directory %s", wb->logdir); + } + sprintf(wb->idstring, "%016lx", wb->id); + if (ckp->logshares) + sprintf(wb->logdir, "%s%08x/%s", ckp->logdir, wb->height, wb->idstring); + + HASH_ADD_I64(sdata->workbases, id, wb); + if (sdata->current_workbase) + tv_time(&sdata->current_workbase->retired); + sdata->current_workbase = wb; + + /* Is this long enough to ensure we don't dereference a workbase + * immediately? Should be unless clock changes 10 minutes so we use + * ts_realtime */ + HASH_ITER(hh, sdata->workbases, tmp, tmpa) { + if (HASH_COUNT(sdata->workbases) < 3) + break; + if (wb == tmp) + continue; + if (tmp->readcount) + continue; + /* Age old workbases older than 10 minutes old */ + if (tmp->gentime.tv_sec < wb->gentime.tv_sec - 600) { + HASH_DEL(sdata->workbases, tmp); + ck_wunlock(&sdata->workbase_lock); + + /* Drop lock to avoid recursive locks */ + age_share_hashtable(sdata, tmp->id); + clear_workbase(ckp, tmp); + + ck_wlock(&sdata->workbase_lock); + } + } + ck_wunlock(&sdata->workbase_lock); + + /* This wb can't be pulled out from under us so no workbase lock is + * required to generate_userwbs */ + if (ckp->btcsolo) + generate_userwbs(sdata, wb); + + if (*new_block) + purge_share_hashtable(sdata, wb->id); + + if (!ckp->passthrough) + send_workinfo(ckp, sdata, wb); +} + +static void broadcast_ping(sdata_t *sdata); + +#define REFCOUNT_REMOTE 20 +#define REFCOUNT_LOCAL 10 +#define REFCOUNT_RETURNED 5 + +/* Submit the transactions in node/remote mode so the local btcd has all the + * transactions that will go into the next blocksolve. */ +static void submit_transaction(ckpool_t *ckp, const char *hash) +{ + char *buf; + + if (unlikely(!ckp->generator_ready)) + return; + ASPRINTF(&buf, "submittxn:%s", hash); + send_proc(ckp->generator,buf); + free(buf); +} + +/* Build a hashlist of all transactions, allowing us to compare with the list of + * existing transactions to determine which need to be propagated */ +static bool add_txn(ckpool_t *ckp, sdata_t *sdata, txntable_t **txns, const char *hash, + const char *data, bool local) +{ + bool found = false; + txntable_t *txn; + + /* Look for transactions we already know about and increment their + * refcount if we're still using them. */ + ck_wlock(&sdata->txn_lock); + HASH_FIND_STR(sdata->txns, hash, txn); + if (txn) { + /* If we already have this in our transaction table but haven't + * seen it in a while, it is reappearing in work and we should + * propagate it again in update_txns. */ + if (txn->refcount > REFCOUNT_RETURNED) + found = true; + if (!local) + txn->refcount = REFCOUNT_REMOTE; + else if (txn->refcount < REFCOUNT_LOCAL) + txn->refcount = REFCOUNT_LOCAL; + txn->seen = true; + } + ck_wunlock(&sdata->txn_lock); + + if (found) + return false; + + txn = ckzalloc(sizeof(txntable_t)); + memcpy(txn->hash, hash, 65); + if (local) + txn->data = strdup(data); + else { + /* Get the data from our local bitcoind as a way of confirming it + * already knows about this transaction. */ + txn->data = generator_get_txn(ckp, hash); + if (!txn->data) { + /* If our local bitcoind hasn't seen this transaction, + * submit it for mempools to be ~synchronised */ + submit_transaction(ckp, data); + txn->data = strdup(data); + } + } + + txn->seen = true; + if (!local || ckp->node) + txn->refcount = REFCOUNT_REMOTE; + else + txn->refcount = REFCOUNT_LOCAL; + HASH_ADD_STR(*txns, hash, txn); + + return true; +} + +static void send_node_transactions(ckpool_t *ckp, sdata_t *sdata, const json_t *txn_val) +{ + stratum_instance_t *client; + ckmsg_t *bulk_send = NULL; + ckmsg_t *client_msg; + int messages = 0; + json_t *json_msg; + smsg_t *msg; + + ck_rlock(&sdata->instance_lock); + DL_FOREACH2(sdata->node_instances, client, node_next) { + json_msg = json_deep_copy(txn_val); + json_set_string(json_msg, "node.method", stratum_msgs[SM_TRANSACTIONS]); + client_msg = ckalloc(sizeof(ckmsg_t)); + msg = ckzalloc(sizeof(smsg_t)); + msg->json_msg = json_msg; + msg->client_id = client->id; + client_msg->data = msg; + DL_APPEND(bulk_send, client_msg); + messages++; + } + DL_FOREACH2(sdata->remote_instances, client, remote_next) { + json_msg = json_deep_copy(txn_val); + json_set_string(json_msg, "method", stratum_msgs[SM_TRANSACTIONS]); + client_msg = ckalloc(sizeof(ckmsg_t)); + msg = ckzalloc(sizeof(smsg_t)); + msg->json_msg = json_msg; + msg->client_id = client->id; + client_msg->data = msg; + DL_APPEND(bulk_send, client_msg); + messages++; + } + ck_runlock(&sdata->instance_lock); + + if (ckp->remote) + upstream_msgtype(ckp, txn_val, SM_TRANSACTIONS); + + if (bulk_send) { + LOGINFO("Sending transactions to mining nodes"); + ssend_bulk_append(sdata, bulk_send, messages); + } +} + +static void submit_transaction_array(ckpool_t *ckp, const json_t *arr) +{ + json_t *arr_val; + size_t index; + + json_array_foreach(arr, index, arr_val) { + submit_transaction(ckp, json_string_value(arr_val)); + } +} + +static void clear_txn(txntable_t *txn) +{ + free(txn->data); + free(txn); +} + +static void update_txns(ckpool_t *ckp, sdata_t *sdata, txntable_t *txns, bool local) +{ + json_t *val, *txn_array = json_array(), *purged_txns = json_array(); + int added = 0, purged = 0; + txntable_t *tmp, *tmpa; + + /* Find which transactions have their refcount decremented to zero + * and remove them. */ + ck_wlock(&sdata->txn_lock); + HASH_ITER(hh, sdata->txns, tmp, tmpa) { + json_t *txn_val; + + if (tmp->seen) { + tmp->seen = false; + continue; + } + if (tmp->refcount-- > 0) + continue; + HASH_DEL(sdata->txns, tmp); + txn_val = json_string(tmp->data); + json_array_append_new(purged_txns, txn_val); + clear_txn(tmp); + purged++; + } + /* Add the new transactions to the transaction table */ + HASH_ITER(hh, txns, tmp, tmpa) { + txntable_t *found; + json_t *txn_val; + + HASH_DEL(txns, tmp); + /* Propagate transaction here */ + JSON_CPACK(txn_val, "{ss,ss}", "hash", tmp->hash, "data", tmp->data); + json_array_append_new(txn_array, txn_val); + + /* Check one last time this txn hasn't already been added in the + * interim. This can happen in add_txn intentionally for a + * transaction that has reappeared. */ + HASH_FIND_STR(sdata->txns, tmp->hash, found); + if (found) { + clear_txn(tmp); + continue; + } + + /* Move to the sdata transaction table */ + HASH_ADD_STR(sdata->txns, hash, tmp); + sdata->txns_generated++; + added++; + } + ck_wunlock(&sdata->txn_lock); + + if (added) { + JSON_CPACK(val, "{so}", "transaction", txn_array); + send_node_transactions(ckp, sdata, val); + json_decref(val); + } else + json_decref(txn_array); + + /* Submit transactions to bitcoind again when we're purging them in + * case they've been removed from its mempool as well and we need them + * again in the future for a remote workinfo that hasn't forgotten + * about them. */ + if (purged && ckp->nodeservers) + submit_transaction_array(ckp, purged_txns); + json_decref(purged_txns); + + if (added || purged) { + LOGINFO("Stratifier added %d %stransactions and purged %d", added, + local ? "" : "remote ", purged); + } +} + +/* Distill down a set of transactions into an efficient tree arrangement for + * stratum messages and fast work assembly. */ +static txntable_t *wb_merkle_bin_txns(ckpool_t *ckp, sdata_t *sdata, workbase_t *wb, + json_t *txn_array, bool local) +{ + int i, j, binleft, binlen; + txntable_t *txns = NULL; + json_t *arr_val; + uchar *hashbin; + + wb->txns = json_array_size(txn_array); + wb->merkles = 0; + binlen = wb->txns * 32 + 32; + hashbin = alloca(binlen + 32); + memset(hashbin, 0, 32); + binleft = binlen / 32; + if (wb->txns) { + int len = 1, ofs = 0; + const char *txn; + + for (i = 0; i < wb->txns; i++) { + arr_val = json_array_get(txn_array, i); + txn = json_string_value(json_object_get(arr_val, "data")); + if (!txn) { + LOGWARNING("json_string_value fail - cannot find transaction data"); + goto out; + } + len += strlen(txn); + } + + wb->txn_data = ckzalloc(len + 1); + wb->txn_hashes = ckzalloc(wb->txns * 65 + 1); + memset(wb->txn_hashes, 0x20, wb->txns * 65); // Spaces + + for (i = 0; i < wb->txns; i++) { + const char *txid, *hash; + char binswap[32]; + + arr_val = json_array_get(txn_array, i); + + // Post-segwit, txid returns the tx hash without witness data + txid = json_string_value(json_object_get(arr_val, "txid")); + hash = json_string_value(json_object_get(arr_val, "hash")); + if (!txid) + txid = hash; + if (unlikely(!txid)) { + LOGERR("Missing txid for transaction in wb_merkle_bins"); + goto out; + } + txn = json_string_value(json_object_get(arr_val, "data")); + add_txn(ckp, sdata, &txns, hash, txn, local); + len = strlen(txn); + memcpy(wb->txn_data + ofs, txn, len); + ofs += len; + if (!hex2bin(binswap, txid, 32)) { + LOGERR("Failed to hex2bin hash in gbt_merkle_bins"); + goto out; + } + memcpy(wb->txn_hashes + i * 65, txid, 64); + bswap_256(hashbin + 32 + 32 * i, binswap); + } + } else + wb->txn_hashes = ckzalloc(1); + wb->merkle_array = json_array(); + if (binleft > 1) { + while (42) { + if (binleft == 1) + break; + memcpy(&wb->merklebin[wb->merkles][0], hashbin + 32, 32); + __bin2hex(&wb->merklehash[wb->merkles][0], &wb->merklebin[wb->merkles][0], 32); + json_array_append_new(wb->merkle_array, json_string(&wb->merklehash[wb->merkles][0])); + LOGDEBUG("MerkleHash %d %s",wb->merkles, &wb->merklehash[wb->merkles][0]); + wb->merkles++; + if (binleft % 2) { + memcpy(hashbin + binlen, hashbin + binlen - 32, 32); + binlen += 32; + binleft++; + } + for (i = 32, j = 64; j < binlen; i += 32, j += 64) + gen_hash(hashbin + j, hashbin + i, 64); + binleft /= 2; + binlen = binleft * 32; + } + } + LOGNOTICE("Stored %s workbase with %d transactions", local ? "local" : "remote", + wb->txns); +out: + return txns; +} + +static const unsigned char witness_nonce[32] = {0}; +static const int witness_nonce_size = sizeof(witness_nonce); +static const unsigned char witness_header[] = {0xaa, 0x21, 0xa9, 0xed}; +static const int witness_header_size = sizeof(witness_header); + +static void gbt_witness_data(workbase_t *wb, json_t *txn_array) +{ + int i, binlen, txncount = json_array_size(txn_array); + const char* hash; + json_t *arr_val; + uchar *hashbin; + + binlen = txncount * 32 + 32; + hashbin = alloca(binlen + 32); + memset(hashbin, 0, 32); + + for (i = 0; i < txncount; i++) { + char binswap[32]; + + arr_val = json_array_get(txn_array, i); + hash = json_string_value(json_object_get(arr_val, "hash")); + if (unlikely(!hash)) { + LOGERR("Hash missing for transaction"); + return; + } + if (!hex2bin(binswap, hash, 32)) { + LOGERR("Failed to hex2bin hash in gbt_witness_data"); + return; + } + bswap_256(hashbin + 32 + 32 * i, binswap); + } + + // Build merkle root (copied from libblkmaker) + for (txncount++ ; txncount > 1 ; txncount /= 2) { + if (txncount % 2) { + // Odd number, duplicate the last + memcpy(hashbin + 32 * txncount, hashbin + 32 * (txncount - 1), 32); + txncount++; + } + for (i = 0; i < txncount; i += 2) { + // We overlap input and output here, on the first pair + gen_hash(hashbin + 32 * i, hashbin + 32 * (i / 2), 64); + } + } + + memcpy(hashbin + 32, &witness_nonce, witness_nonce_size); + gen_hash(hashbin, hashbin + witness_header_size, 32 + witness_nonce_size); + memcpy(hashbin, witness_header, witness_header_size); + __bin2hex(wb->witnessdata, hashbin, 32 + witness_header_size); + wb->insert_witness = true; +} + +/* This function assumes it will only receive a valid json gbt base template + * since checking should have been done earlier, and creates the base template + * for generating work templates. This is a ckmsgq so all uses of this function + * are serialised. */ +static void block_update(ckpool_t *ckp, int *prio) +{ + bool new_block = false, ret = false; + const char *witnessdata_check; + sdata_t *sdata = ckp->sdata; + json_t *txn_array; + txntable_t *txns; + int retries = 0; + workbase_t *wb; + +retry: + wb = generator_getbase(ckp); + if (unlikely(!wb)) { + if (retries++ < 5 || *prio == GEN_PRIORITY) { + LOGWARNING("Generator returned failure in update_base, retry #%d", retries); + goto retry; + } + LOGWARNING("Generator failed in update_base after retrying"); + goto out; + } + if (unlikely(retries)) + LOGWARNING("Generator succeeded in update_base after retrying"); + + wb->ckp = ckp; + + txn_array = json_object_get(wb->json, "transactions"); + txns = wb_merkle_bin_txns(ckp, sdata, wb, txn_array, true); + + wb->insert_witness = false; + + witnessdata_check = json_string_value(json_object_get(wb->json, "default_witness_commitment")); + if (likely(witnessdata_check)) { + LOGDEBUG("Default witness commitment present, adding witness data"); + gbt_witness_data(wb, txn_array); + // Verify against the pre-calculated value if it exists. Skip the size/OP_RETURN bytes. + if (wb->insert_witness && safecmp(witnessdata_check + 4, wb->witnessdata) != 0) + LOGERR("Witness from btcd: %s. Calculated Witness: %s", witnessdata_check + 4, wb->witnessdata); + } + + generate_coinbase(ckp, wb); + + add_base(ckp, sdata, wb, &new_block); + + if (new_block) + LOGNOTICE("Block hash changed to %s", sdata->lastswaphash); + if (ckp->btcsolo) + stratum_broadcast_updates(sdata, new_block); + else + stratum_broadcast_update(sdata, wb, new_block); + ret = true; + LOGINFO("Broadcast updated stratum base"); + /* Update transactions after stratum broadcast to not delay + * propagation. */ + if (likely(txns)) + update_txns(ckp, sdata, txns, true); + /* Reset the update time to avoid stacked low priority notifies. Bring + * forward the next notify in case of a new block. */ + sdata->update_time = time(NULL); + if (new_block) + sdata->update_time -= ckp->update_interval / 2; +out: + + cksem_post(&sdata->update_sem); + + /* Send a ping to miners if we fail to get a base to keep them + * connected while bitcoind recovers(?) */ + if (unlikely(!ret)) { + LOGINFO("Broadcast ping due to failed stratum base update"); + broadcast_ping(sdata); + } + free(prio); +} + +#define SSEND_PREPEND 0 +#define SSEND_APPEND 1 + +/* Downstream a json message to all remote servers except for the one matching + * client_id */ +static void downstream_json(sdata_t *sdata, const json_t *val, const int64_t client_id, + const int prio) +{ + stratum_instance_t *client; + ckmsg_t *bulk_send = NULL; + int messages = 0; + + ck_rlock(&sdata->instance_lock); + DL_FOREACH2(sdata->remote_instances, client, remote_next) { + ckmsg_t *client_msg; + json_t *json_msg; + smsg_t *msg; + + /* Don't send remote workinfo back to same remote */ + if (client->id == client_id) + continue; + json_msg = json_deep_copy(val); + client_msg = ckalloc(sizeof(ckmsg_t)); + msg = ckzalloc(sizeof(smsg_t)); + msg->json_msg = json_msg; + msg->client_id = client->id; + client_msg->data = msg; + DL_APPEND(bulk_send, client_msg); + messages++; + } + ck_runlock(&sdata->instance_lock); + + if (bulk_send) { + LOGINFO("Sending json to %d remote servers", messages); + switch (prio) { + case SSEND_PREPEND: + ssend_bulk_prepend(sdata, bulk_send, messages); + break; + case SSEND_APPEND: + ssend_bulk_append(sdata, bulk_send, messages); + break; + } + } +} + +/* Find any transactions that are missing from our transaction table during + * rebuild_txns by requesting their data from another server. */ +static void request_txns(ckpool_t *ckp, sdata_t *sdata, json_t *txns) +{ + json_t *val; + + JSON_CPACK(val, "{so}", "hash", txns); + if (ckp->remote) + upstream_msgtype(ckp, val, SM_REQTXNS); + else if (ckp->node) { + /* Nodes have no way to signal upstream pool yet */ + } else { + /* We don't know which remote sent the transaction hash so ask + * all of them for it */ + json_set_string(val, "method", stratum_msgs[SM_REQTXNS]); + downstream_json(sdata, val, 0, SSEND_APPEND); + } +} + +/* Rebuilds transactions from txnhashes to be able to construct wb_merkle_bins + * on remote workbases */ +static bool rebuild_txns(ckpool_t *ckp, sdata_t *sdata, workbase_t *wb) +{ + const char *hashes = wb->txn_hashes; + json_t *txn_array, *missing_txns; + char hash[68] = {}; + bool ret = false; + txntable_t *txns; + int i, len = 0; + + /* We'll only see this on testnet now */ + if (unlikely(!wb->txns)) { + ret = true; + goto out; + } + if (likely(hashes)) + len = strlen(hashes); + if (!hashes || !len) + goto out; + + if (unlikely(len < wb->txns * 65)) { + LOGERR("Truncated transactions in rebuild_txns only %d long", len); + goto out; + } + ret = true; + txn_array = json_array(); + missing_txns = json_array(); + + for (i = 0; i < wb->txns; i++) { + json_t *txn_val = NULL; + txntable_t *txn; + char *data; + + memcpy(hash, hashes + i * 65, 64); + + ck_wlock(&sdata->txn_lock); + HASH_FIND_STR(sdata->txns, hash, txn); + if (likely(txn)) { + txn->refcount = REFCOUNT_REMOTE; + txn->seen = true; + JSON_CPACK(txn_val, "{ss,ss}", + "hash", hash, "data", txn->data); + json_array_append_new(txn_array, txn_val); + } + ck_wunlock(&sdata->txn_lock); + + if (likely(txn_val)) + continue; + /* See if we can find it in our local bitcoind */ + data = generator_get_txn(ckp, hash); + if (!data) { + txn_val = json_string(hash); + json_array_append_new(missing_txns, txn_val); + ret = false; + continue; + } + + /* We've found it, let's add it to the table */ + ck_wlock(&sdata->txn_lock); + /* One last check in case it got added while we dropped the lock */ + HASH_FIND_STR(sdata->txns, hash, txn); + if (likely(!txn)) { + txn = ckzalloc(sizeof(txntable_t)); + memcpy(txn->hash, hash, 65); + txn->data = data; + HASH_ADD_STR(sdata->txns, hash, txn); + sdata->txns_generated++; + } else { + free(data); + } + txn->refcount = REFCOUNT_REMOTE; + txn->seen = true; + JSON_CPACK(txn_val, "{ss,ss}", + "hash", hash, "data", txn->data); + json_array_append_new(txn_array, txn_val); + ck_wunlock(&sdata->txn_lock); + } + + if (ret) { + wb->incomplete = false; + LOGINFO("Rebuilt txns into workbase with %d transactions", i); + /* These two structures are regenerated so free their ram */ + json_decref(wb->merkle_array); + dealloc(wb->txn_hashes); + txns = wb_merkle_bin_txns(ckp, sdata, wb, txn_array, false); + if (likely(txns)) + update_txns(ckp, sdata, txns, false); + } else { + if (!sdata->wbincomplete) { + sdata->wbincomplete = true; + if (ckp->proxy) + LOGWARNING("Unable to rebuild transactions to create workinfo, ignore displayed hashrate"); + } + LOGINFO("Failed to find all txns in rebuild_txns"); + request_txns(ckp, sdata, missing_txns); + } + + json_decref(txn_array); + json_decref(missing_txns); +out: + return ret; +} + +/* Remote workbases are keyed by the combined values of wb->id and + * wb->client_id to prevent collisions in the unlikely event two remote + * servers are generating the same workbase ids. */ +static void __add_to_remote_workbases(sdata_t *sdata, workbase_t *wb) +{ + HASH_ADD(hh, sdata->remote_workbases, id, sizeof(int64_t) * 2, wb); +} + +static void add_remote_base(ckpool_t *ckp, sdata_t *sdata, workbase_t *wb) +{ + stratum_instance_t *client; + ckmsg_t *bulk_send = NULL; + workbase_t *tmp, *tmpa; + int messages = 0; + int64_t skip; + json_t *val; + + ts_realtime(&wb->gentime); + + ck_wlock(&sdata->workbase_lock); + sdata->workbases_generated++; + wb->mapped_id = sdata->workbase_id++; + HASH_ITER(hh, sdata->remote_workbases, tmp, tmpa) { + if (HASH_COUNT(sdata->remote_workbases) < 3) + break; + if (wb == tmp) + continue; + if (tmp->readcount) + continue; + /* Age old workbases older than 10 minutes old */ + if (tmp->gentime.tv_sec < wb->gentime.tv_sec - 600) { + HASH_DEL(sdata->remote_workbases, tmp); + ck_wunlock(&sdata->workbase_lock); + + clear_workbase(ckp, tmp); + + ck_wlock(&sdata->workbase_lock); + } + } + __add_to_remote_workbases(sdata, wb); + ck_wunlock(&sdata->workbase_lock); + + val = generate_workinfo(ckp, wb, __func__); + + /* Set jobid with mapped id for other nodes and remotes */ + json_set_int64(val, "jobid", wb->mapped_id); + + /* Replace workinfoid to mapped id */ + json_set_int64(val, "workinfoid", wb->mapped_id); + + /* Strip unnecessary fields and add extra fields needed */ + json_set_int(val, "txns", wb->txns); + json_set_string(val, "txn_hashes", wb->txn_hashes); + json_set_int(val, "merkles", wb->merkles); + + skip = subclient(wb->client_id); + + /* Send a copy of this to all OTHER remote trusted servers as well */ + ck_rlock(&sdata->instance_lock); + DL_FOREACH2(sdata->remote_instances, client, remote_next) { + ckmsg_t *client_msg; + json_t *json_msg; + smsg_t *msg; + + /* Don't send remote workinfo back to the source remote */ + if (client->id == wb->client_id) + continue; + json_msg = json_deep_copy(val); + json_set_string(json_msg, "method", stratum_msgs[SM_WORKINFO]); + client_msg = ckalloc(sizeof(ckmsg_t)); + msg = ckzalloc(sizeof(smsg_t)); + msg->json_msg = json_msg; + msg->client_id = client->id; + client_msg->data = msg; + DL_APPEND(bulk_send, client_msg); + messages++; + } + DL_FOREACH2(sdata->node_instances, client, node_next) { + ckmsg_t *client_msg; + json_t *json_msg; + smsg_t *msg; + + /* Don't send node workinfo back to the source node */ + if (client->id == skip) + continue; + json_msg = json_deep_copy(val); + json_set_string(json_msg, "node.method", stratum_msgs[SM_WORKINFO]); + client_msg = ckalloc(sizeof(ckmsg_t)); + msg = ckzalloc(sizeof(smsg_t)); + msg->json_msg = json_msg; + msg->client_id = client->id; + client_msg->data = msg; + DL_APPEND(bulk_send, client_msg); + messages++; + } + ck_runlock(&sdata->instance_lock); + + json_decref(val); + + if (bulk_send) { + LOGINFO("Sending remote workinfo to %d other remote servers", messages); + ssend_bulk_append(sdata, bulk_send, messages); + } +} + +static void add_node_base(ckpool_t *ckp, json_t *val, bool trusted, int64_t client_id) +{ + workbase_t *wb = ckzalloc(sizeof(workbase_t)); + sdata_t *sdata = ckp->sdata; + bool new_block = false; + char header[272]; + + wb->ckp = ckp; + /* This is the client id if this workbase came from a remote trusted + * server. */ + wb->client_id = client_id; + + /* Some of these fields are empty when running as a remote trusted + * server receiving other workinfos from the upstream pool */ + json_int64cpy(&wb->id, val, "jobid"); + json_strcpy(wb->target, val, "target"); + json_dblcpy(&wb->diff, val, "diff"); + json_uintcpy(&wb->version, val, "version"); + json_uintcpy(&wb->curtime, val, "curtime"); + json_strcpy(wb->prevhash, val, "prevhash"); + json_strcpy(wb->ntime, val, "ntime"); + sscanf(wb->ntime, "%x", &wb->ntime32); + json_strcpy(wb->bbversion, val, "bbversion"); + json_strcpy(wb->nbit, val, "nbit"); + json_uint64cpy(&wb->coinbasevalue, val, "coinbasevalue"); + json_intcpy(&wb->height, val, "height"); + json_strdup(&wb->flags, val, "flags"); + + json_intcpy(&wb->txns, val, "txns"); + json_strdup(&wb->txn_hashes, val, "txn_hashes"); + if (!ckp->proxy) { + /* This is a workbase from a trusted remote */ + wb->merkle_array = json_object_dup(val, "merklehash"); + json_intcpy(&wb->merkles, val, "merkles"); + if (!rebuild_txns(ckp, sdata, wb)) + wb->incomplete = true; + } else { + if (!rebuild_txns(ckp, sdata, wb)) { + clear_workbase(ckp, wb); + return; + } + } + json_strdup(&wb->coinb1, val, "coinb1"); + json_intcpy(&wb->coinb1len, val, "coinb1len"); + wb->coinb1bin = ckzalloc(wb->coinb1len); + hex2bin(wb->coinb1bin, wb->coinb1, wb->coinb1len); + json_strdup(&wb->coinb2, val, "coinb2"); + json_intcpy(&wb->coinb2len, val, "coinb2len"); + wb->coinb2bin = ckzalloc(wb->coinb2len); + hex2bin(wb->coinb2bin, wb->coinb2, wb->coinb2len); + json_intcpy(&wb->enonce1varlen, val, "enonce1varlen"); + json_intcpy(&wb->enonce2varlen, val, "enonce2varlen"); + ts_realtime(&wb->gentime); + + snprintf(header, 270, "%s%s%s%s%s%s%s", + wb->bbversion, wb->prevhash, + "0000000000000000000000000000000000000000000000000000000000000000", + wb->ntime, wb->nbit, + "00000000", /* nonce */ + workpadding); + header[224] = 0; + LOGDEBUG("Header: %s", header); + hex2bin(wb->headerbin, header, 112); + + /* If this is from a remote trusted server or an upstream server, add + * it to the remote_workbases hashtable */ + if (trusted) + add_remote_base(ckp, sdata, wb); + else + add_base(ckp, sdata, wb, &new_block); + + if (new_block) + LOGNOTICE("Block hash changed to %s", sdata->lastswaphash); +} + +/* Calculate share diff and fill in hash and swap. Need to hold workbase read count */ +static double +share_diff(char *coinbase, const uchar *enonce1bin, const workbase_t *wb, const char *nonce2, + const uint32_t ntime32, uint32_t version_mask, const char *nonce, + uchar *hash, uchar *swap, int *cblen) +{ + unsigned char merkle_root[32], merkle_sha[64]; + uint32_t *data32, *swap32, benonce32; + uchar hash1[32]; + char data[80]; + int i; + + memcpy(coinbase, wb->coinb1bin, wb->coinb1len); + *cblen = wb->coinb1len; + memcpy(coinbase + *cblen, enonce1bin, wb->enonce1constlen + wb->enonce1varlen); + *cblen += wb->enonce1constlen + wb->enonce1varlen; + hex2bin(coinbase + *cblen, nonce2, wb->enonce2varlen); + *cblen += wb->enonce2varlen; + memcpy(coinbase + *cblen, wb->coinb2bin, wb->coinb2len); + *cblen += wb->coinb2len; + + gen_hash((uchar *)coinbase, merkle_root, *cblen); + memcpy(merkle_sha, merkle_root, 32); + for (i = 0; i < wb->merkles; i++) { + memcpy(merkle_sha + 32, &wb->merklebin[i], 32); + gen_hash(merkle_sha, merkle_root, 64); + memcpy(merkle_sha, merkle_root, 32); + } + data32 = (uint32_t *)merkle_sha; + swap32 = (uint32_t *)merkle_root; + flip_32(swap32, data32); + + /* Copy the cached header binary and insert the merkle root */ + memcpy(data, wb->headerbin, 80); + memcpy(data + 36, merkle_root, 32); + + /* Update nVersion when version_mask is in use */ + if (version_mask) { + version_mask = htobe32(version_mask); + data32 = (uint32_t *)data; + *data32 |= version_mask; + } + + /* Insert the nonce value into the data */ + hex2bin(&benonce32, nonce, 4); + data32 = (uint32_t *)(data + 64 + 12); + *data32 = benonce32; + + /* Insert the ntime value into the data */ + data32 = (uint32_t *)(data + 68); + *data32 = htobe32(ntime32); + + /* Hash the share */ + data32 = (uint32_t *)data; + swap32 = (uint32_t *)swap; + flip_80(swap32, data32); + sha256(swap, 80, hash1); + sha256(hash1, 32, hash); + + /* Calculate the diff of the share here */ + return diff_from_target(hash); +} + +static void add_remote_blockdata(ckpool_t *ckp, json_t *val, const int cblen, const char *coinbase, + const uchar *data) +{ + char *buf; + + json_set_string(val, "name", ckp->name); + json_set_int(val, "cblen", cblen); + buf = bin2hex(coinbase, cblen); + json_set_string(val, "coinbasehex", buf); + free(buf); + buf = bin2hex(data, 80); + json_set_string(val, "swaphex", buf); + free(buf); +} + +/* Entered with workbase readcount, grabs instance_lock. client_id is where the + * block originated. */ +static void send_nodes_block(sdata_t *sdata, const json_t *block_val, const int64_t client_id) +{ + stratum_instance_t *client; + ckmsg_t *bulk_send = NULL; + int messages = 0; + int64_t skip; + + /* Don't send the block back to a remote node if that's where it was + * found. */ + skip = subclient(client_id); + + ck_rlock(&sdata->instance_lock); + DL_FOREACH2(sdata->node_instances, client, node_next) { + ckmsg_t *client_msg; + json_t *json_msg; + smsg_t *msg; + + if (client->id == skip) + continue; + json_msg = json_deep_copy(block_val); + json_set_string(json_msg, "node.method", stratum_msgs[SM_BLOCK]); + client_msg = ckalloc(sizeof(ckmsg_t)); + msg = ckzalloc(sizeof(smsg_t)); + msg->json_msg = json_msg; + msg->client_id = client->id; + client_msg->data = msg; + DL_APPEND(bulk_send, client_msg); + messages++; + } + ck_runlock(&sdata->instance_lock); + + if (bulk_send) { + LOGNOTICE("Sending block to %d mining nodes", messages); + ssend_bulk_prepend(sdata, bulk_send, messages); + } + +} + + +/* Entered with workbase readcount. */ +static void send_node_block(ckpool_t *ckp, sdata_t *sdata, const char *enonce1, const char *nonce, + const char *nonce2, const uint32_t ntime32, const uint32_t version_mask, + const int64_t jobid, const double diff, const int64_t client_id, + const char *coinbase, const int cblen, const uchar *data) +{ + if (sdata->node_instances) { + json_t *val = json_object(); + + json_set_string(val, "enonce1", enonce1); + json_set_string(val, "nonce", nonce); + json_set_string(val, "nonce2", nonce2); + json_set_uint32(val, "ntime32", ntime32); + json_set_uint32(val, "version_mask", version_mask); + json_set_int64(val, "jobid", jobid); + json_set_double(val, "diff", diff); + add_remote_blockdata(ckp, val, cblen, coinbase, data); + send_nodes_block(sdata, val, client_id); + json_decref(val); + } +} + +/* Process a block into a message for the generator to submit. Must hold + * workbase readcount */ +static char * +process_block(const workbase_t *wb, const char *coinbase, const int cblen, + const uchar *data, const uchar *hash, uchar *flip32, char *blockhash) +{ + char *gbt_block, varint[12]; + int txns = wb->txns + 1; + char hexcoinbase[1024]; + + flip_32(flip32, hash); + __bin2hex(blockhash, flip32, 32); + + /* Message format: "data" */ + gbt_block = ckzalloc(1024); + __bin2hex(gbt_block, data, 80); + if (txns < 0xfd) { + uint8_t val8 = txns; + + __bin2hex(varint, (const unsigned char *)&val8, 1); + } else if (txns <= 0xffff) { + uint16_t val16 = htole16(txns); + + strcat(gbt_block, "fd"); + __bin2hex(varint, (const unsigned char *)&val16, 2); + } else { + uint32_t val32 = htole32(txns); + + strcat(gbt_block, "fe"); + __bin2hex(varint, (const unsigned char *)&val32, 4); + } + strcat(gbt_block, varint); + __bin2hex(hexcoinbase, coinbase, cblen); + strcat(gbt_block, hexcoinbase); + if (wb->txns) + realloc_strcat(&gbt_block, wb->txn_data); + return gbt_block; +} + +/* Submit block data locally, absorbing and freeing gbt_block */ +static bool local_block_submit(ckpool_t *ckp, char *gbt_block, const uchar *flip32, int height) +{ + bool ret = generator_submitblock(ckp, gbt_block); + char heighthash[68] = {}, rhash[68] = {}; + uchar swap256[32]; + + free(gbt_block); + swap_256(swap256, flip32); + __bin2hex(rhash, swap256, 32); + generator_preciousblock(ckp, rhash); + + /* Check failures that may be inconclusive but were submitted via other + * means or accepted due to precious block call. */ + if (!ret) { + /* If the block is accepted locally, it means we may have + * displaced a known block, and are now working on this fork. + * This makes the most sense since if we solve the next block, + * it validates this one as the best chain, orphaning the other + * block. In the case of mainnet, it means we have found a stale + * block and are trying to force ours ahead of the other. In + * a low diff environment we may have successive blocks, and + * this will be the last one solved locally. Trying to optimise + * regtest/testnet will optimise against the mainnet case. */ + if (generator_get_blockhash(ckp, height, heighthash)) { + ret = !strncmp(rhash, heighthash, 64); + LOGWARNING("Hash for forced possibly stale block, height %d confirms block was %s", + height, ret ? "ACCEPTED" : "REJECTED"); + } + } + return ret; +} + +static workbase_t *get_workbase(sdata_t *sdata, const int64_t id) +{ + workbase_t *wb; + + ck_wlock(&sdata->workbase_lock); + HASH_FIND_I64(sdata->workbases, &id, wb); + if (wb) + wb->readcount++; + ck_wunlock(&sdata->workbase_lock); + + return wb; +} + +static workbase_t *__find_remote_workbase(sdata_t *sdata, const int64_t id, const int64_t client_id) +{ + int64_t lookup[2] = {id, client_id}; + workbase_t *wb; + + HASH_FIND(hh, sdata->remote_workbases, lookup, sizeof(int64_t) * 2, wb); + return wb; +} + +static workbase_t *get_remote_workbase(sdata_t *sdata, const int64_t id, const int64_t client_id) +{ + workbase_t *wb; + + ck_wlock(&sdata->workbase_lock); + wb = __find_remote_workbase(sdata, id, client_id); + if (wb) { + if (wb->incomplete) + wb = NULL; + else + wb->readcount++; + } + ck_wunlock(&sdata->workbase_lock); + + return wb; +} + +static void put_workbase(sdata_t *sdata, workbase_t *wb) +{ + ck_wlock(&sdata->workbase_lock); + wb->readcount--; + ck_wunlock(&sdata->workbase_lock); +} + +#define put_remote_workbase(sdata, wb) put_workbase(sdata, wb) + +static void block_solve(ckpool_t *ckp, json_t *val); +static void block_reject(json_t *val); + +static void submit_node_block(ckpool_t *ckp, sdata_t *sdata, json_t *val) +{ + char *coinbase = NULL, *enonce1 = NULL, *nonce = NULL, *nonce2 = NULL, *gbt_block, + *coinbasehex, *swaphex; + uchar *enonce1bin = NULL, hash[32], swap[80], flip32[32]; + uint32_t ntime32, version_mask = 0; + char blockhash[68], cdfield[64]; + int enonce1len, cblen; + workbase_t *wb = NULL; + json_t *bval; + double diff; + ts_t ts_now; + int64_t id; + bool ret; + + if (unlikely(!json_get_string(&enonce1, val, "enonce1"))) { + LOGWARNING("Failed to get enonce1 from node method block"); + goto out; + } + if (unlikely(!json_get_string(&nonce, val, "nonce"))) { + LOGWARNING("Failed to get nonce from node method block"); + goto out; + } + if (unlikely(!json_get_string(&nonce2, val, "nonce2"))) { + LOGWARNING("Failed to get nonce2 from node method block"); + goto out; + } + if (unlikely(!json_get_uint32(&ntime32, val, "ntime32"))) { + LOGWARNING("Failed to get ntime32 from node method block"); + goto out; + } + if (unlikely(!json_get_int64(&id, val, "jobid"))) { + LOGWARNING("Failed to get jobid from node method block"); + goto out; + } + if (unlikely(!json_get_double(&diff, val, "diff"))) { + LOGWARNING("Failed to get diff from node method block"); + goto out; + } + + if (!json_get_uint32(&version_mask, val, "version_mask")) { + /* No version mask is not fatal, assume it to be zero */ + LOGINFO("No version mask in node method block"); + } + + LOGWARNING("Possible upstream block solve diff %lf !", diff); + + ts_realtime(&ts_now); + sprintf(cdfield, "%lu,%lu", ts_now.tv_sec, ts_now.tv_nsec); + + wb = get_workbase(sdata, id); + if (unlikely(!wb)) { + LOGWARNING("Failed to find workbase with jobid %"PRId64" in node method block", id); + goto out; + } + + /* Get parameters if upstream pool supports them with new format */ + json_get_string(&coinbasehex, val, "coinbasehex"); + json_get_int(&cblen, val, "cblen"); + json_get_string(&swaphex, val, "swaphex"); + if (coinbasehex && cblen && swaphex) { + uchar hash1[32]; + + coinbase = alloca(cblen); + hex2bin(coinbase, coinbasehex, cblen); + hex2bin(swap, swaphex, 80); + sha256(swap, 80, hash1); + sha256(hash1, 32, hash); + } else { + /* Rebuild the old way if we can if the upstream pool is using + * the old format only */ + enonce1len = wb->enonce1constlen + wb->enonce1varlen; + enonce1bin = alloca(enonce1len); + hex2bin(enonce1bin, enonce1, enonce1len); + coinbase = alloca(wb->coinb1len + wb->enonce1constlen + wb->enonce1varlen + wb->enonce2varlen + wb->coinb2len); + /* Fill in the hashes */ + share_diff(coinbase, enonce1bin, wb, nonce2, ntime32, version_mask, nonce, hash, swap, &cblen); + } + + /* Now we have enough to assemble a block */ + gbt_block = process_block(wb, coinbase, cblen, swap, hash, flip32, blockhash); + ret = local_block_submit(ckp, gbt_block, flip32, wb->height); + + JSON_CPACK(bval, "{si,ss,ss,sI,ss,ss,si,ss,sI,sf,ss,ss,ss,ss}", + "height", wb->height, + "blockhash", blockhash, + "confirmed", "n", + "workinfoid", wb->id, + "enonce1", enonce1, + "nonce2", nonce2, + "version_mask", version_mask, + "nonce", nonce, + "reward", wb->coinbasevalue, + "diff", diff, + "createdate", cdfield, + "createby", "code", + "createcode", __func__, + "createinet", ckp->serverurl[0]); + put_workbase(sdata, wb); + + if (ret) + block_solve(ckp, bval); + else + block_reject(bval); + + json_decref(bval); +out: + free(nonce2); + free(nonce); + free(enonce1); +} + +static void update_base(sdata_t *sdata, const int prio) +{ + int *uprio; + + /* All uses of block_update are serialised so if we have more + * update_base calls waiting there is no point servicing them unless + * they are high priority. */ + if (prio < GEN_PRIORITY) { + /* Don't queue another routine update if one is already in + * progress. */ + if (cksem_trywait(&sdata->update_sem)) { + LOGINFO("Skipped lowprio update base"); + return; + } + } else + cksem_wait(&sdata->update_sem); + + uprio = ckalloc(sizeof(int)); + *uprio = prio; + ckmsgq_add(sdata->updateq, uprio); +} + +/* Instead of removing the client instance, we add it to a list of recycled + * clients allowing us to reuse it instead of callocing a new one */ +static void __kill_instance(sdata_t *sdata, stratum_instance_t *client) +{ + if (client->proxy) { + client->proxy->bound_clients--; + client->proxy->parent->combined_clients--; + } + free(client->workername); + free(client->password); + free(client->useragent); + memset(client, 0, sizeof(stratum_instance_t)); + DL_APPEND2(sdata->recycled_instances, client, recycled_prev, recycled_next); +} + +/* Called with instance_lock held. Note stats.users is protected by + * instance lock to avoid recursive locking. */ +static void __inc_worker(sdata_t *sdata, user_instance_t *user, worker_instance_t *worker) +{ + sdata->stats.workers++; + if (!user->workers++) + sdata->stats.users++; + worker->instance_count++; +} + +static void __dec_worker(sdata_t *sdata, user_instance_t *user, worker_instance_t *worker) +{ + sdata->stats.workers--; + if (!--user->workers) + sdata->stats.users--; + worker->instance_count--; +} + +static void __disconnect_session(sdata_t *sdata, const stratum_instance_t *client) +{ + time_t now_t = time(NULL); + session_t *session, *tmp; + + /* Opportunity to age old sessions */ + HASH_ITER(hh, sdata->disconnected_sessions, session, tmp) { + if (now_t - session->added > 600) { + HASH_DEL(sdata->disconnected_sessions, session); + dealloc(session); + sdata->stats.disconnected--; + } + } + + if (!client->enonce1_64 || !client->user_instance || !client->authorised) + return; + HASH_FIND_INT(sdata->disconnected_sessions, &client->session_id, session); + if (session) + return; + session = ckalloc(sizeof(session_t)); + session->enonce1_64 = client->enonce1_64; + session->session_id = client->session_id; + session->client_id = client->id; + session->userid = client->user_id; + session->added = now_t; + strcpy(session->address, client->address); + HASH_ADD_INT(sdata->disconnected_sessions, session_id, session); + sdata->stats.disconnected++; + sdata->disconnected_generated++; +} + +/* Removes a client instance we know is on the stratum_instances list and from + * the user client list if it's been placed on it */ +static void __del_client(sdata_t *sdata, stratum_instance_t *client) +{ + user_instance_t *user = client->user_instance; + + HASH_DEL(sdata->stratum_instances, client); + if (user) { + DL_DELETE2(user->clients, client, user_prev, user_next ); + __dec_worker(sdata, user, client->worker_instance); + } +} + +static void connector_drop_client(ckpool_t *ckp, const int64_t id) +{ + char buf[256]; + + LOGDEBUG("Stratifier requesting connector drop client %"PRId64, id); + snprintf(buf, 255, "dropclient=%"PRId64, id); + send_proc(ckp->connector, buf); +} + +static void drop_allclients(ckpool_t *ckp) +{ + stratum_instance_t *client, *tmp; + sdata_t *sdata = ckp->sdata; + int kills = 0; + + ck_wlock(&sdata->instance_lock); + HASH_ITER(hh, sdata->stratum_instances, client, tmp) { + int64_t client_id = client->id; + + if (!client->ref) { + __del_client(sdata, client); + __kill_instance(sdata, client); + } else + client->dropped = true; + kills++; + connector_drop_client(ckp, client_id); + } + sdata->stats.users = sdata->stats.workers = 0; + ck_wunlock(&sdata->instance_lock); + + if (kills) + LOGNOTICE("Dropped %d instances for dropall request", kills); +} + +/* Copy only the relevant parts of the master sdata for each subproxy */ +static sdata_t *duplicate_sdata(const sdata_t *sdata) +{ + sdata_t *dsdata = ckzalloc(sizeof(sdata_t)); + + dsdata->ckp = sdata->ckp; + + /* Copy the transaction binaries for workbase creation */ + memcpy(dsdata->txnbin, sdata->txnbin, 40); + memcpy(dsdata->dontxnbin, sdata->dontxnbin, 40); + + /* Use the same work queues for all subproxies */ + dsdata->ssends = sdata->ssends; + dsdata->srecvs = sdata->srecvs; + dsdata->sshareq = sdata->sshareq; + dsdata->sauthq = sdata->sauthq; + dsdata->stxnq = sdata->stxnq; + + /* Give the sbuproxy its own workbase list and lock */ + cklock_init(&dsdata->workbase_lock); + cksem_init(&dsdata->update_sem); + cksem_post(&dsdata->update_sem); + return dsdata; +} + +static int64_t prio_sort(proxy_t *a, proxy_t *b) +{ + return (a->priority - b->priority); +} + +/* Masked increment */ +static int64_t masked_inc(int64_t value, int64_t mask) +{ + value &= ~mask; + value++; + value |= mask; + return value; +} + +/* Priority values can be sparse, they do not need to be sequential */ +static void __set_proxy_prio(sdata_t *sdata, proxy_t *proxy, int64_t priority) +{ + proxy_t *tmpa, *tmpb, *exists = NULL; + int64_t mask, next_prio = 0; + + /* Encode the userid as the high bits in priority */ + mask = proxy->userid; + mask <<= 32; + priority |= mask; + + /* See if the priority is already in use */ + HASH_ITER(hh, sdata->proxies, tmpa, tmpb) { + if (tmpa->priority > priority) + break; + if (tmpa->priority == priority) { + exists = tmpa; + next_prio = masked_inc(priority, mask); + break; + } + } + /* See if we need to push the priority of everything after exists up */ + HASH_ITER(hh, exists, tmpa, tmpb) { + if (tmpa->priority > next_prio) + break; + tmpa->priority = masked_inc(tmpa->priority, mask); + next_prio++; + } + proxy->priority = priority; + HASH_SORT(sdata->proxies, prio_sort); +} + +static proxy_t *__generate_proxy(sdata_t *sdata, const int id) +{ + proxy_t *proxy = ckzalloc(sizeof(proxy_t)); + + proxy->parent = proxy; + proxy->id = id; + proxy->sdata = duplicate_sdata(sdata); + proxy->sdata->subproxy = proxy; + proxy->sdata->verbose = true; + /* subid == 0 on parent proxy */ + HASH_ADD(sh, proxy->subproxies, subid, sizeof(int), proxy); + proxy->subproxy_count++; + HASH_ADD_INT(sdata->proxies, id, proxy); + /* Set the initial priority to impossibly high initially as the userid + * has yet to be inherited and the priority should be set only after + * all the proxy details are finalised. */ + proxy->priority = 0x00FFFFFFFFFFFFFF; + HASH_SORT(sdata->proxies, prio_sort); + sdata->proxy_count++; + return proxy; +} + +static proxy_t *__generate_subproxy(sdata_t *sdata, proxy_t *proxy, const int subid) +{ + proxy_t *subproxy = ckzalloc(sizeof(proxy_t)); + + subproxy->parent = proxy; + subproxy->id = proxy->id; + subproxy->subid = subid; + HASH_ADD(sh, proxy->subproxies, subid, sizeof(int), subproxy); + proxy->subproxy_count++; + subproxy->sdata = duplicate_sdata(sdata); + subproxy->sdata->subproxy = subproxy; + return subproxy; +} + +static proxy_t *__existing_proxy(const sdata_t *sdata, const int id) +{ + proxy_t *proxy; + + HASH_FIND_INT(sdata->proxies, &id, proxy); + return proxy; +} + +static proxy_t *existing_proxy(sdata_t *sdata, const int id) +{ + proxy_t *proxy; + + mutex_lock(&sdata->proxy_lock); + proxy = __existing_proxy(sdata, id); + mutex_unlock(&sdata->proxy_lock); + + return proxy; +} + +/* Find proxy by id number, generate one if none exist yet by that id */ +static proxy_t *__proxy_by_id(sdata_t *sdata, const int id) +{ + proxy_t *proxy = __existing_proxy(sdata, id); + + if (unlikely(!proxy)) { + proxy = __generate_proxy(sdata, id); + LOGNOTICE("Stratifier added new proxy %d", id); + } + + return proxy; +} + +static proxy_t *__existing_subproxy(proxy_t *proxy, const int subid) +{ + proxy_t *subproxy; + + HASH_FIND(sh, proxy->subproxies, &subid, sizeof(int), subproxy); + return subproxy; +} + +static proxy_t *__subproxy_by_id(sdata_t *sdata, proxy_t *proxy, const int subid) +{ + proxy_t *subproxy = __existing_subproxy(proxy, subid); + + if (!subproxy) { + subproxy = __generate_subproxy(sdata, proxy, subid); + LOGINFO("Stratifier added new subproxy %d:%d", proxy->id, subid); + } + return subproxy; +} + +static proxy_t *subproxy_by_id(sdata_t *sdata, const int id, const int subid) +{ + proxy_t *proxy, *subproxy; + + mutex_lock(&sdata->proxy_lock); + proxy = __proxy_by_id(sdata, id); + subproxy = __subproxy_by_id(sdata, proxy, subid); + mutex_unlock(&sdata->proxy_lock); + + return subproxy; +} + +static proxy_t *existing_subproxy(sdata_t *sdata, const int id, const int subid) +{ + proxy_t *proxy, *subproxy = NULL; + + mutex_lock(&sdata->proxy_lock); + proxy = __existing_proxy(sdata, id); + if (proxy) + subproxy = __existing_subproxy(proxy, subid); + mutex_unlock(&sdata->proxy_lock); + + return subproxy; +} + +static void check_userproxies(sdata_t *sdata, proxy_t *proxy, const int userid); + +static void set_proxy_prio(sdata_t *sdata, proxy_t *proxy, const int priority) +{ + mutex_lock(&sdata->proxy_lock); + __set_proxy_prio(sdata, proxy, priority); + mutex_unlock(&sdata->proxy_lock); + + if (!proxy->global) + check_userproxies(sdata, proxy, proxy->userid); +} + +/* Set proxy to the current proxy and calculate how much headroom it has */ +static int64_t current_headroom(sdata_t *sdata, proxy_t **proxy) +{ + proxy_t *subproxy, *tmp; + int64_t headroom = 0; + + mutex_lock(&sdata->proxy_lock); + *proxy = sdata->proxy; + if (!*proxy) + goto out_unlock; + HASH_ITER(sh, (*proxy)->subproxies, subproxy, tmp) { + if (subproxy->dead) + continue; + headroom += subproxy->max_clients - subproxy->clients; + } +out_unlock: + mutex_unlock(&sdata->proxy_lock); + + return headroom; +} + +/* Returns the headroom available for more clients of the best alive user proxy + * for userid. */ +static int64_t best_userproxy_headroom(sdata_t *sdata, const int userid) +{ + proxy_t *proxy, *subproxy, *tmp, *subtmp; + int64_t headroom = 0; + + mutex_lock(&sdata->proxy_lock); + HASH_ITER(hh, sdata->proxies, proxy, tmp) { + bool alive = false; + + if (proxy->userid < userid) + continue; + if (proxy->userid > userid) + break; + HASH_ITER(sh, proxy->subproxies, subproxy, subtmp) { + if (subproxy->dead) + continue; + alive = true; + headroom += subproxy->max_clients - subproxy->clients; + } + /* Proxies are ordered by priority so first available will be + * the best priority */ + if (alive) + break; + } + mutex_unlock(&sdata->proxy_lock); + + return headroom; +} + +static void reconnect_client(sdata_t *sdata, stratum_instance_t *client); + +static void generator_recruit(ckpool_t *ckp, const int proxyid, const int recruits) +{ + char buf[256]; + + sprintf(buf, "recruit=%d:%d", proxyid, recruits); + LOGINFO("Stratifer requesting %d more subproxies of proxy %d from generator", + recruits, proxyid); + send_proc(ckp->generator,buf); +} + +/* Find how much headroom we have and connect up to that many clients that are + * not currently on this pool, recruiting more slots to switch more clients + * later on lazily. Only reconnect clients bound to global proxies. */ +static void reconnect_global_clients(sdata_t *sdata) +{ + stratum_instance_t *client, *tmpclient; + int reconnects = 0; + int64_t headroom; + proxy_t *proxy; + + headroom = current_headroom(sdata, &proxy); + if (!proxy) + return; + + ck_rlock(&sdata->instance_lock); + HASH_ITER(hh, sdata->stratum_instances, client, tmpclient) { + if (client->dropped) + continue; + if (!client->authorised) + continue; + /* Is this client bound to a dead proxy? */ + if (!client->reconnect) { + /* This client is bound to a user proxy */ + if (client->proxy->userid) + continue; + if (client->proxyid == proxy->id) + continue; + } + if (headroom-- < 1) + continue; + reconnects++; + reconnect_client(sdata, client); + } + ck_runlock(&sdata->instance_lock); + + if (reconnects) { + LOGINFO("%d clients flagged for reconnect to global proxy %d", + reconnects, proxy->id); + } + if (headroom < 0) + generator_recruit(sdata->ckp, proxy->id, -headroom); +} + +static bool __subproxies_alive(proxy_t *proxy) +{ + proxy_t *subproxy, *tmp; + bool alive = false; + + HASH_ITER(sh, proxy->subproxies, subproxy, tmp) { + if (!subproxy->dead) { + alive = true; + break; + } + } + return alive; +} + +/* Iterate over the current global proxy list and see if the current one is + * the highest priority alive one. Proxies are sorted by priority so the first + * available will be highest priority. Uses ckp sdata */ +static void check_bestproxy(sdata_t *sdata) +{ + proxy_t *proxy, *tmp; + int changed_id = -1; + + mutex_lock(&sdata->proxy_lock); + if (sdata->proxy && !__subproxies_alive(sdata->proxy)) + sdata->proxy = NULL; + HASH_ITER(hh, sdata->proxies, proxy, tmp) { + if (!__subproxies_alive(proxy)) + continue; + if (!proxy->global) + break; + if (proxy != sdata->proxy) { + sdata->proxy = proxy; + changed_id = proxy->id; + } + break; + } + mutex_unlock(&sdata->proxy_lock); + + if (changed_id != -1) + LOGNOTICE("Stratifier setting active proxy to %d", changed_id); +} + +static proxy_t *best_proxy(sdata_t *sdata) +{ + proxy_t *proxy; + + mutex_lock(&sdata->proxy_lock); + proxy = sdata->proxy; + mutex_unlock(&sdata->proxy_lock); + + return proxy; +} + +static void check_globalproxies(sdata_t *sdata, proxy_t *proxy) +{ + check_bestproxy(sdata); + if (proxy->parent == best_proxy(sdata)->parent) + reconnect_global_clients(sdata); +} + +static void check_proxy(sdata_t *sdata, proxy_t *proxy) +{ + if (proxy->global) + check_globalproxies(sdata, proxy); + else + check_userproxies(sdata, proxy, proxy->userid); +} + +static void dead_proxyid(sdata_t *sdata, const int id, const int subid, const bool replaced, const bool deleted) +{ + stratum_instance_t *client, *tmp; + int reconnects = 0, proxyid = 0; + int64_t headroom; + proxy_t *proxy; + + proxy = existing_subproxy(sdata, id, subid); + if (proxy) { + proxy->dead = true; + proxy->deleted = deleted; + if (!replaced && proxy->global) + check_bestproxy(sdata); + } + LOGINFO("Stratifier dropping clients from proxy %d:%d", id, subid); + headroom = current_headroom(sdata, &proxy); + if (proxy) + proxyid = proxy->id; + + ck_rlock(&sdata->instance_lock); + HASH_ITER(hh, sdata->stratum_instances, client, tmp) { + if (client->proxyid != id || client->subproxyid != subid) + continue; + /* Clients could remain connected to a dead connection here + * but should be picked up when we recruit enough slots after + * another notify. */ + if (headroom-- < 1) { + client->reconnect = true; + continue; + } + reconnects++; + reconnect_client(sdata, client); + } + ck_runlock(&sdata->instance_lock); + + if (reconnects) { + LOGINFO("%d clients flagged to reconnect from dead proxy %d:%d", reconnects, + id, subid); + } + /* When a proxy dies, recruit more of the global proxies for them to + * fail over to in case user proxies are unavailable. */ + if (headroom < 0) + generator_recruit(sdata->ckp, proxyid, -headroom); +} + +static void update_subscribe(ckpool_t *ckp, const char *cmd) +{ + sdata_t *sdata = ckp->sdata, *dsdata; + int id = 0, subid = 0, userid = 0; + proxy_t *proxy, *old = NULL; + const char *buf; + bool global; + json_t *val; + + if (unlikely(strlen(cmd) < 11)) { + LOGWARNING("Received zero length string for subscribe in update_subscribe"); + return; + } + buf = cmd + 10; + LOGDEBUG("Update subscribe: %s", buf); + val = json_loads(buf, 0, NULL); + if (unlikely(!val)) { + LOGWARNING("Failed to json decode subscribe response in update_subscribe %s", buf); + return; + } + if (unlikely(!json_get_int(&id, val, "proxy"))) { + LOGWARNING("Failed to json decode proxy value in update_subscribe %s", buf); + return; + } + if (unlikely(!json_get_int(&subid, val, "subproxy"))) { + LOGWARNING("Failed to json decode subproxy value in update_subscribe %s", buf); + return; + } + if (unlikely(!json_get_bool(&global, val, "global"))) { + LOGWARNING("Failed to json decode global value in update_subscribe %s", buf); + return; + } + if (!global) { + if (unlikely(!json_get_int(&userid, val, "userid"))) { + LOGWARNING("Failed to json decode userid value in update_subscribe %s", buf); + return; + } + } + + if (!subid) + LOGNOTICE("Got updated subscribe for proxy %d", id); + else + LOGINFO("Got updated subscribe for proxy %d:%d", id, subid); + + /* Is this a replacement for an existing proxy id? */ + old = existing_subproxy(sdata, id, subid); + if (old) { + dead_proxyid(sdata, id, subid, true, false); + proxy = old; + proxy->dead = false; + } else /* This is where all new proxies are created */ + proxy = subproxy_by_id(sdata, id, subid); + proxy->global = global; + proxy->userid = userid; + proxy->subscribed = true; + proxy->diff = ckp->startdiff; + memset(proxy->baseurl, 0, 128); + memset(proxy->url, 0, 128); + memset(proxy->auth, 0, 128); + memset(proxy->pass, 0, 128); + strncpy(proxy->baseurl, json_string_value(json_object_get(val, "baseurl")), 127); + strncpy(proxy->url, json_string_value(json_object_get(val, "url")), 127); + strncpy(proxy->auth, json_string_value(json_object_get(val, "auth")), 127); + strncpy(proxy->pass, json_string_value(json_object_get(val, "pass")), 127); + + dsdata = proxy->sdata; + + ck_wlock(&dsdata->workbase_lock); + /* Length is checked by generator */ + strcpy(proxy->enonce1, json_string_value(json_object_get(val, "enonce1"))); + proxy->enonce1constlen = strlen(proxy->enonce1) / 2; + hex2bin(proxy->enonce1bin, proxy->enonce1, proxy->enonce1constlen); + proxy->nonce2len = json_integer_value(json_object_get(val, "nonce2len")); + if (ckp->nonce2length) { + proxy->enonce1varlen = proxy->nonce2len - ckp->nonce2length; + if (proxy->enonce1varlen < 0) + proxy->enonce1varlen = 0; + } else if (proxy->nonce2len > 7) + proxy->enonce1varlen = 4; + else if (proxy->nonce2len > 5) + proxy->enonce1varlen = 2; + else if (proxy->nonce2len > 3) + proxy->enonce1varlen = 1; + else + proxy->enonce1varlen = 0; + proxy->enonce2varlen = proxy->nonce2len - proxy->enonce1varlen; + proxy->max_clients = 1ll << (proxy->enonce1varlen * 8); + proxy->clients = 0; + ck_wunlock(&dsdata->workbase_lock); + + if (subid) { + LOGINFO("Upstream pool %s %d:%d extranonce2 length %d, max proxy clients %"PRId64, + proxy->url, id, subid, proxy->nonce2len, proxy->max_clients); + } else { + LOGNOTICE("Upstream pool %s %d extranonce2 length %d, max proxy clients %"PRId64, + proxy->url, id, proxy->nonce2len, proxy->max_clients); + } + if (ckp->nonce2length && proxy->enonce2varlen != ckp->nonce2length) + LOGWARNING("Only able to set nonce2len %d of requested %d on proxy %d:%d", + proxy->enonce2varlen, ckp->nonce2length, id, subid); + json_decref(val); + + /* Set the priority on a new proxy now that we have all the fields + * filled in to push it to its correct priority position in the + * hashlist. */ + if (!old) + set_proxy_prio(sdata, proxy, id); + + check_proxy(sdata, proxy); +} + +/* Find the highest priority alive proxy belonging to userid and recruit extra + * subproxies. */ +static void recruit_best_userproxy(sdata_t *sdata, const int userid, const int recruits) +{ + proxy_t *proxy, *subproxy, *tmp, *subtmp; + int id = -1; + + mutex_lock(&sdata->proxy_lock); + HASH_ITER(hh, sdata->proxies, proxy, tmp) { + if (proxy->userid < userid) + continue; + if (proxy->userid > userid) + break; + HASH_ITER(sh, proxy->subproxies, subproxy, subtmp) { + if (subproxy->dead) + continue; + id = proxy->id; + } + } + mutex_unlock(&sdata->proxy_lock); + + if (id != -1) + generator_recruit(sdata->ckp, id, recruits); +} + +/* Check how much headroom the userid proxies have and reconnect any clients + * that are not bound to it that should be */ +static void check_userproxies(sdata_t *sdata, proxy_t *proxy, const int userid) +{ + int64_t headroom = best_userproxy_headroom(sdata, userid); + stratum_instance_t *client, *tmpclient; + int reconnects = 0; + + ck_rlock(&sdata->instance_lock); + HASH_ITER(hh, sdata->stratum_instances, client, tmpclient) { + if (client->dropped) + continue; + if (!client->authorised) + continue; + if (client->user_id != userid) + continue; + /* Is the client already bound to a proxy of its own userid of + * a higher priority than this one. */ + if (client->proxy->userid == userid && + client->proxy->parent->priority <= proxy->parent->priority) + continue; + if (headroom-- < 1) + continue; + reconnects++; + reconnect_client(sdata, client); + } + ck_runlock(&sdata->instance_lock); + + if (reconnects) { + LOGINFO("%d clients flagged for reconnect to user %d proxies", + reconnects, userid); + } + if (headroom < 0) + recruit_best_userproxy(sdata, userid, -headroom); +} + +static void update_notify(ckpool_t *ckp, const char *cmd) +{ + sdata_t *sdata = ckp->sdata, *dsdata; + bool new_block = false, clean; + int i, id = 0, subid = 0; + char header[272]; + const char *buf; + proxy_t *proxy; + workbase_t *wb; + json_t *val; + + if (unlikely(strlen(cmd) < 8)) { + LOGWARNING("Zero length string passed to update_notify"); + return; + } + buf = cmd + 7; /* "notify=" */ + LOGDEBUG("Update notify: %s", buf); + + val = json_loads(buf, 0, NULL); + if (unlikely(!val)) { + LOGWARNING("Failed to json decode in update_notify"); + return; + } + json_get_int(&id, val, "proxy"); + json_get_int(&subid, val, "subproxy"); + proxy = existing_subproxy(sdata, id, subid); + if (unlikely(!proxy || !proxy->subscribed)) { + LOGINFO("No valid proxy %d:%d subscription to update notify yet", id, subid); + goto out; + } + LOGINFO("Got updated notify for proxy %d:%d", id, subid); + + wb = ckzalloc(sizeof(workbase_t)); + wb->ckp = ckp; + wb->proxy = true; + + json_get_int64(&wb->id, val, "jobid"); + json_strcpy(wb->prevhash, val, "prevhash"); + json_intcpy(&wb->coinb1len, val, "coinb1len"); + wb->coinb1bin = ckalloc(wb->coinb1len); + wb->coinb1 = ckalloc(wb->coinb1len * 2 + 1); + json_strcpy(wb->coinb1, val, "coinbase1"); + hex2bin(wb->coinb1bin, wb->coinb1, wb->coinb1len); + wb->height = get_sernumber(wb->coinb1bin + 42); + json_strdup(&wb->coinb2, val, "coinbase2"); + wb->coinb2len = strlen(wb->coinb2) / 2; + wb->coinb2bin = ckalloc(wb->coinb2len); + hex2bin(wb->coinb2bin, wb->coinb2, wb->coinb2len); + wb->merkle_array = json_object_dup(val, "merklehash"); + wb->merkles = json_array_size(wb->merkle_array); + for (i = 0; i < wb->merkles; i++) { + strcpy(&wb->merklehash[i][0], json_string_value(json_array_get(wb->merkle_array, i))); + hex2bin(&wb->merklebin[i][0], &wb->merklehash[i][0], 32); + } + json_strcpy(wb->bbversion, val, "bbversion"); + json_strcpy(wb->nbit, val, "nbit"); + json_strcpy(wb->ntime, val, "ntime"); + sscanf(wb->ntime, "%x", &wb->ntime32); + clean = json_is_true(json_object_get(val, "clean")); + ts_realtime(&wb->gentime); + snprintf(header, 270, "%s%s%s%s%s%s%s", + wb->bbversion, wb->prevhash, + "0000000000000000000000000000000000000000000000000000000000000000", + wb->ntime, wb->nbit, + "00000000", /* nonce */ + workpadding); + header[224] = 0; + LOGDEBUG("Header: %s", header); + hex2bin(wb->headerbin, header, 112); + wb->txn_hashes = ckzalloc(1); + + dsdata = proxy->sdata; + + ck_rlock(&dsdata->workbase_lock); + strcpy(wb->enonce1const, proxy->enonce1); + wb->enonce1constlen = proxy->enonce1constlen; + memcpy(wb->enonce1constbin, proxy->enonce1bin, wb->enonce1constlen); + wb->enonce1varlen = proxy->enonce1varlen; + wb->enonce2varlen = proxy->enonce2varlen; + wb->diff = proxy->diff; + ck_runlock(&dsdata->workbase_lock); + + add_base(ckp, dsdata, wb, &new_block); + if (new_block) { + if (subid) + LOGINFO("Block hash on proxy %d:%d changed to %s", id, subid, dsdata->lastswaphash); + else + LOGNOTICE("Block hash on proxy %d changed to %s", id, dsdata->lastswaphash); + } + + check_proxy(sdata, proxy); + clean |= new_block; + LOGINFO("Proxy %d:%d broadcast updated stratum notify with%s clean", id, + subid, clean ? "" : "out"); + stratum_broadcast_update(dsdata, wb, clean); +out: + json_decref(val); +} + +static void stratum_send_diff(sdata_t *sdata, const stratum_instance_t *client); + +static void update_diff(ckpool_t *ckp, const char *cmd) +{ + sdata_t *sdata = ckp->sdata, *dsdata; + stratum_instance_t *client, *tmp; + double old_diff, diff; + int id = 0, subid = 0; + const char *buf; + proxy_t *proxy; + json_t *val; + + if (unlikely(strlen(cmd) < 6)) { + LOGWARNING("Zero length string passed to update_diff"); + return; + } + buf = cmd + 5; /* "diff=" */ + LOGDEBUG("Update diff: %s", buf); + + val = json_loads(buf, 0, NULL); + if (unlikely(!val)) { + LOGWARNING("Failed to json decode in update_diff"); + return; + } + json_get_int(&id, val, "proxy"); + json_get_int(&subid, val, "subproxy"); + json_dblcpy(&diff, val, "diff"); + json_decref(val); + + LOGINFO("Got updated diff for proxy %d:%d", id, subid); + proxy = existing_subproxy(sdata, id, subid); + if (!proxy) { + LOGINFO("No existing subproxy %d:%d to update diff", id, subid); + return; + } + + /* Support fractional difficulty values. Set minimum to a very small + * positive value to prevent division by zero. */ + if (unlikely(diff < 0.000001)) + diff = 0.000001; + + dsdata = proxy->sdata; + + if (unlikely(!dsdata->current_workbase)) { + LOGINFO("No current workbase to update diff yet"); + return; + } + + ck_wlock(&dsdata->workbase_lock); + old_diff = proxy->diff; + dsdata->current_workbase->diff = proxy->diff = diff; + ck_wunlock(&dsdata->workbase_lock); + + if (old_diff < diff) + return; + + /* If the diff has dropped, iterate over all the clients and check + * they're at or below the new diff, and update it if not. */ + ck_rlock(&sdata->instance_lock); + HASH_ITER(hh, sdata->stratum_instances, client, tmp) { + if (client->proxyid != id) + continue; + if (client->subproxyid != subid) + continue; + if (client->diff > diff) { + client->diff = diff; + stratum_send_diff(sdata, client); + } + } + ck_runlock(&sdata->instance_lock); +} + +#if 0 +static void generator_drop_proxy(ckpool_t *ckp, const int64_t id, const int subid) +{ + char msg[256]; + + sprintf(msg, "dropproxy=%ld:%d", id, subid); + send_proc(ckp->generator,msg); +} +#endif + +static void free_proxy(ckpool_t *ckp, proxy_t *proxy) +{ + sdata_t *dsdata = proxy->sdata; + + /* Delete any shares in the proxy's hashtable. */ + if (dsdata) { + share_t *share, *tmpshare; + workbase_t *wb, *tmpwb; + + mutex_lock(&dsdata->share_lock); + HASH_ITER(hh, dsdata->shares, share, tmpshare) { + HASH_DEL(dsdata->shares, share); + dealloc(share); + } + mutex_unlock(&dsdata->share_lock); + + /* Do we need to check readcount here if freeing the proxy? */ + ck_wlock(&dsdata->workbase_lock); + HASH_ITER(hh, dsdata->workbases, wb, tmpwb) { + HASH_DEL(dsdata->workbases, wb); + clear_workbase(ckp, wb); + } + ck_wunlock(&dsdata->workbase_lock); + } + + free(proxy->sdata); + free(proxy); +} + +/* Remove subproxies that are flagged dead. Then see if there + * are any retired proxies that no longer have any other subproxies and reap + * those. */ +static void reap_proxies(ckpool_t *ckp, sdata_t *sdata) +{ + proxy_t *proxy, *proxytmp, *subproxy, *subtmp; + int dead = 0; + + if (!ckp->proxy) + return; + + mutex_lock(&sdata->proxy_lock); + HASH_ITER(hh, sdata->proxies, proxy, proxytmp) { + HASH_ITER(sh, proxy->subproxies, subproxy, subtmp) { + if (!subproxy->bound_clients && !subproxy->dead) { + /* Reset the counter to reuse this proxy */ + subproxy->clients = 0; + continue; + } + if (proxy == subproxy) + continue; + if (subproxy->bound_clients) + continue; + if (!subproxy->dead) + continue; + if (unlikely(!subproxy->subid)) { + LOGWARNING("Unexepectedly found proxy %d:%d as subproxy of %d:%d", + subproxy->id, subproxy->subid, proxy->id, proxy->subid); + continue; + } + if (unlikely(subproxy == sdata->proxy)) { + LOGWARNING("Unexepectedly found proxy %d:%d as current", + subproxy->id, subproxy->subid); + continue; + } + dead++; + HASH_DELETE(sh, proxy->subproxies, subproxy); + proxy->subproxy_count--; + free_proxy(ckp, subproxy); + } + /* Should we reap the parent proxy too?*/ + if (!proxy->deleted || proxy->subproxy_count > 1 || proxy->bound_clients) + continue; + HASH_DELETE(sh, proxy->subproxies, proxy); + HASH_DELETE(hh, sdata->proxies, proxy); + free_proxy(ckp, proxy); + } + mutex_unlock(&sdata->proxy_lock); + + if (dead) + LOGINFO("Stratifier discarded %d dead proxies", dead); +} + +/* Enter with instance_lock held */ +static stratum_instance_t *__instance_by_id(sdata_t *sdata, const int64_t id) +{ + stratum_instance_t *client; + + HASH_FIND_I64(sdata->stratum_instances, &id, client); + return client; +} + +/* Increase the reference count of instance */ +static void __inc_instance_ref(stratum_instance_t *client) +{ + client->ref++; +} + +/* Find an __instance_by_id and increase its reference count allowing us to + * use this instance outside of instance_lock without fear of it being + * dereferenced. Does not return dropped clients still on the list. */ +static inline stratum_instance_t *ref_instance_by_id(sdata_t *sdata, const int64_t id) +{ + stratum_instance_t *client; + + ck_wlock(&sdata->instance_lock); + client = __instance_by_id(sdata, id); + if (client) { + if (unlikely(client->dropped)) + client = NULL; + else + __inc_instance_ref(client); + } + ck_wunlock(&sdata->instance_lock); + + return client; +} + +static void __drop_client(sdata_t *sdata, stratum_instance_t *client, bool lazily, char **msg) +{ + user_instance_t *user = client->user_instance; + + if (unlikely(client->node)) + DL_DELETE2(sdata->node_instances, client, node_prev, node_next); + else if (unlikely(client->trusted)) + DL_DELETE2(sdata->remote_instances, client, remote_prev, remote_next); + + if (client->workername) { + if (user) { + /* No message anywhere if throttled, too much flood and + * these only can be LOGNOTICE messages. + */ + if (!user->throttled) { + ASPRINTF(msg, "Dropped client %s %s user %s worker %s %s", + client->identity, client->address, + user->username, client->workername, lazily ? "lazily" : ""); + } + } else { + ASPRINTF(msg, "Dropped client %s %s no user worker %s %s", + client->identity, client->address, client->workername, + lazily ? "lazily" : ""); + } + } else { + /* Workerless client. Too noisy to log them all */ + } + __del_client(sdata, client); + __kill_instance(sdata, client); +} + +static int __dec_instance_ref(stratum_instance_t *client) +{ + return --client->ref; +} + +/* Decrease the reference count of instance. */ +static void _dec_instance_ref(sdata_t *sdata, stratum_instance_t *client, const char *file, + const char *func, const int line) +{ + char_entry_t *entries = NULL; + bool dropped = false; + char *msg = NULL; + int ref; + + ck_wlock(&sdata->instance_lock); + ref = __dec_instance_ref(client); + /* See if there are any instances that were dropped that could not be + * moved due to holding a reference and drop them now. */ + if (unlikely(client->dropped && !ref)) { + dropped = true; + __drop_client(sdata, client, true, &msg); + if (msg) + add_msg_entry(&entries, &msg); + } + ck_wunlock(&sdata->instance_lock); + + if (entries) + notice_msg_entries(&entries); + /* This should never happen */ + if (unlikely(ref < 0)) + LOGERR("Instance ref count dropped below zero from %s %s:%d", file, func, line); + + if (dropped) + reap_proxies(sdata->ckp, sdata); +} + +#define dec_instance_ref(sdata, instance) _dec_instance_ref(sdata, instance, __FILE__, __func__, __LINE__) + +/* If we have a no longer used stratum instance in the recycled linked list, + * use that, otherwise calloc a fresh one. */ +static stratum_instance_t *__recruit_stratum_instance(sdata_t *sdata) +{ + stratum_instance_t *client = sdata->recycled_instances; + + if (client) + DL_DELETE2(sdata->recycled_instances, client, recycled_prev, recycled_next); + else { + client = ckzalloc(sizeof(stratum_instance_t)); + sdata->stratum_generated++; + } + return client; +} + +/* Enter with write instance_lock held, drops and grabs it again */ +static stratum_instance_t *__stratum_add_instance(ckpool_t *ckp, int64_t id, const char *address, + int server) +{ + sdata_t *sdata = ckp->sdata; + stratum_instance_t *client; + int64_t pass_id; + + client = __recruit_stratum_instance(sdata); + ck_wunlock(&sdata->instance_lock); + + client->start_time = time(NULL); + client->id = id; + client->session_id = ++sdata->session_id; + strcpy(client->address, address); + /* Sanity check to not overflow lookup in ckp->serverurl[] */ + if (server >= ckp->serverurls) + server = 0; + client->server = server; + client->diff = client->old_diff = ckp->startdiff; + if (ckp->server_highdiff && ckp->server_highdiff[server]) { + client->suggest_diff = ckp->highdiff; + if (client->suggest_diff > client->diff) + client->diff = client->old_diff = client->suggest_diff; + } + client->ckp = ckp; + tv_time(&client->ldc); + /* Points to ckp sdata in ckpool mode, but is changed later in proxy + * mode . */ + client->sdata = sdata; + if ((pass_id = subclient(id))) { + stratum_instance_t *remote = __instance_by_id(sdata, pass_id); + + id &= 0xffffffffll; + if (remote && remote->node) { + client->latency = remote->latency; + LOGINFO("Client %s inherited node latency of %d", + client->identity, client->latency); + sprintf(client->identity, "node:%"PRId64" subclient:%"PRId64, + pass_id, id); + } else if (remote && remote->trusted) { + sprintf(client->identity, "remote:%"PRId64" subclient:%"PRId64, + pass_id, id); + } else { /* remote->passthrough remaining */ + sprintf(client->identity, "passthrough:%"PRId64" subclient:%"PRId64, + pass_id, id); + } + client->virtualid = connector_newclientid(ckp); + } else { + sprintf(client->identity, "%"PRId64, id); + client->virtualid = id; + } + + ck_wlock(&sdata->instance_lock); + HASH_ADD_I64(sdata->stratum_instances, id, client); + return client; +} + +static uint64_t disconnected_sessionid_exists(sdata_t *sdata, const int session_id, + const int64_t id) +{ + session_t *session; + int64_t old_id = 0; + uint64_t ret = 0; + + ck_wlock(&sdata->instance_lock); + HASH_FIND_INT(sdata->disconnected_sessions, &session_id, session); + if (!session) + goto out_unlock; + HASH_DEL(sdata->disconnected_sessions, session); + sdata->stats.disconnected--; + ret = session->enonce1_64; + old_id = session->client_id; + dealloc(session); +out_unlock: + ck_wunlock(&sdata->instance_lock); + + if (ret) + LOGINFO("Reconnecting old instance %"PRId64" to instance %"PRId64, old_id, id); + return ret; +} + +static inline bool client_active(stratum_instance_t *client) +{ + return (client->authorised && !client->dropped); +} + +static inline bool remote_server(stratum_instance_t *client) +{ + return (client->node || client->passthrough || client->trusted); +} + +/* Ask the connector asynchronously to send us dropclient commands if this + * client no longer exists. */ +static void connector_test_client(ckpool_t *ckp, const int64_t id) +{ + char buf[256]; + + LOGDEBUG("Stratifier requesting connector test client %"PRId64, id); + snprintf(buf, 255, "testclient=%"PRId64, id); + send_proc(ckp->connector, buf); +} + +/* For creating a list of sends without locking that can then be concatenated + * to the stratum_sends list. Minimises locking and avoids taking recursive + * locks. Sends only to sdata bound clients (everyone in ckpool) */ +static void stratum_broadcast(sdata_t *sdata, json_t *val, const int msg_type) +{ + ckpool_t *ckp = sdata->ckp; + sdata_t *ckp_sdata = ckp->sdata; + stratum_instance_t *client, *tmp; + ckmsg_t *bulk_send = NULL; + int messages = 0; + + if (unlikely(!val)) { + LOGERR("Sent null json to stratum_broadcast"); + return; + } + + if (ckp->node) { + json_decref(val); + return; + } + + ck_rlock(&ckp_sdata->instance_lock); + HASH_ITER(hh, ckp_sdata->stratum_instances, client, tmp) { + ckmsg_t *client_msg; + smsg_t *msg; + + if (sdata != ckp_sdata && client->sdata != sdata) + continue; + + if (!client_active(client) || remote_server(client)) + continue; + + /* Only send messages to whitelisted clients */ + if (msg_type == SM_MSG && !client->messages) + continue; + + client_msg = ckalloc(sizeof(ckmsg_t)); + msg = ckzalloc(sizeof(smsg_t)); + if (subclient(client->id)) + json_set_string(val, "node.method", stratum_msgs[msg_type]); + msg->json_msg = json_deep_copy(val); + msg->client_id = client->id; + client_msg->data = msg; + DL_APPEND(bulk_send, client_msg); + messages++; + } + ck_runlock(&ckp_sdata->instance_lock); + + json_decref(val); + + if (likely(bulk_send)) + ssend_bulk_append(sdata, bulk_send, messages); +} + +static void stratum_add_send(sdata_t *sdata, json_t *val, const int64_t client_id, + const int msg_type) +{ + ckpool_t *ckp = sdata->ckp; + int64_t remote_id; + smsg_t *msg; + + if (ckp->node) { + /* Node shouldn't be sending any messages as it only uses the + * stratifier for monitoring activity. */ + json_decref(val); + return; + } + + if ((remote_id = subclient(client_id))) { + stratum_instance_t *remote = ref_instance_by_id(sdata, remote_id); + + if (unlikely(!remote)) { + json_decref(val); + return; + } + if (remote->trusted) + json_set_string(val, "method", stratum_msgs[msg_type]); + else /* Both remote->node and remote->passthrough */ + json_set_string(val, "node.method", stratum_msgs[msg_type]); + dec_instance_ref(sdata, remote); + } + LOGDEBUG("Sending stratum message %s", stratum_msgs[msg_type]); + msg = ckzalloc(sizeof(smsg_t)); + msg->json_msg = val; + msg->client_id = client_id; + if (likely(ckmsgq_add(sdata->ssends, msg))) + return; + json_decref(msg->json_msg); + free(msg); +} + +static void drop_client(ckpool_t *ckp, sdata_t *sdata, const int64_t id) +{ + char_entry_t *entries = NULL; + stratum_instance_t *client; + char *msg = NULL; + + LOGINFO("Stratifier asked to drop client %"PRId64, id); + + ck_wlock(&sdata->instance_lock); + client = __instance_by_id(sdata, id); + if (client && !client->dropped) { + __disconnect_session(sdata, client); + /* If the client is still holding a reference, don't drop them + * now but wait till the reference is dropped */ + if (!client->ref) { + __drop_client(sdata, client, false, &msg); + if (msg) + add_msg_entry(&entries, &msg); + } else + client->dropped = true; + } + ck_wunlock(&sdata->instance_lock); + + if (entries) + notice_msg_entries(&entries); + reap_proxies(ckp, sdata); +} + +static void stratum_broadcast_message(sdata_t *sdata, const char *msg) +{ + json_t *json_msg; + + JSON_CPACK(json_msg, "{sosss[s]}", "id", json_null(), "method", "client.show_message", + "params", msg); + stratum_broadcast(sdata, json_msg, SM_MSG); +} + +/* Send a generic reconnect to all clients without parameters to make them + * reconnect to the same server. */ +static void request_reconnect(sdata_t *sdata, const char *cmd) +{ + char *port = strdupa(cmd), *url = NULL; + stratum_instance_t *client, *tmp; + json_t *json_msg; + + strsep(&port, ":"); + if (port) + url = strsep(&port, ","); + if (url && port) { + JSON_CPACK(json_msg, "{sosss[ssi]}", "id", json_null(), "method", "client.reconnect", + "params", url, port, 0); + } else + JSON_CPACK(json_msg, "{sosss[]}", "id", json_null(), "method", "client.reconnect", + "params"); + stratum_broadcast(sdata, json_msg, SM_RECONNECT); + + /* Tag all existing clients as dropped now so they can be removed + * lazily */ + ck_wlock(&sdata->instance_lock); + HASH_ITER(hh, sdata->stratum_instances, client, tmp) { + client->dropped = true; + } + ck_wunlock(&sdata->instance_lock); +} + +static void reset_bestshares(sdata_t *sdata) +{ + user_instance_t *user, *tmpuser; + stratum_instance_t *client, *tmp; + + /* Can do this unlocked since it's just zeroing the values */ + sdata->stats.accounted_diff_shares = + sdata->stats.accounted_shares = + sdata->stats.accounted_rejects = 0; + sdata->stats.best_diff = 0; + + ck_rlock(&sdata->instance_lock); + HASH_ITER(hh, sdata->stratum_instances, client, tmp) { + client->best_diff = 0; + } + HASH_ITER(hh, sdata->user_instances, user, tmpuser) { + worker_instance_t *worker; + + user->best_diff = 0; + DL_FOREACH(user->worker_instances, worker) { + worker->best_diff = 0; + } + } + ck_runlock(&sdata->instance_lock); +} + +static user_instance_t *get_user(sdata_t *sdata, const char *username); + +static user_instance_t *user_by_workername(sdata_t *sdata, const char *workername) +{ + char *username = strdupa(workername), *ignore; + user_instance_t *user; + + ignore = username; + strsep(&ignore, "._"); + + /* Find the user first */ + user = get_user(sdata, username); + return user; +} + +static worker_instance_t *get_worker(sdata_t *sdata, user_instance_t *user, const char *workername); + +static json_t *worker_stats(const worker_instance_t *worker) +{ + char suffix1[16], suffix5[16], suffix60[16], suffix1440[16], suffix10080[16]; + json_t *val; + double ghs; + + ghs = worker->dsps1 * nonces; + suffix_string(ghs, suffix1, 16, 0); + + ghs = worker->dsps5 * nonces; + suffix_string(ghs, suffix5, 16, 0); + + ghs = worker->dsps60 * nonces; + suffix_string(ghs, suffix60, 16, 0); + + ghs = worker->dsps1440 * nonces; + suffix_string(ghs, suffix1440, 16, 0); + + ghs = worker->dsps10080 * nonces; + suffix_string(ghs, suffix10080, 16, 0); + + JSON_CPACK(val, "{ss,ss,ss,ss,ss}", + "hashrate1m", suffix1, + "hashrate5m", suffix5, + "hashrate1hr", suffix60, + "hashrate1d", suffix1440, + "hashrate7d", suffix10080); + return val; +} + +static json_t *user_stats(const user_instance_t *user) +{ + char suffix1[16], suffix5[16], suffix60[16], suffix1440[16], suffix10080[16]; + json_t *val; + double ghs; + + ghs = user->dsps1 * nonces; + suffix_string(ghs, suffix1, 16, 0); + + ghs = user->dsps5 * nonces; + suffix_string(ghs, suffix5, 16, 0); + + ghs = user->dsps60 * nonces; + suffix_string(ghs, suffix60, 16, 0); + + ghs = user->dsps1440 * nonces; + suffix_string(ghs, suffix1440, 16, 0); + + ghs = user->dsps10080 * nonces; + suffix_string(ghs, suffix10080, 16, 0); + + JSON_CPACK(val, "{ss,ss,ss,ss,ss,sI,sI}", + "hashrate1m", suffix1, + "hashrate5m", suffix5, + "hashrate1hr", suffix60, + "hashrate1d", suffix1440, + "hashrate7d", suffix10080, + "shares", user->shares, + "authorised", user->auth_time); + return val; +} + +/* Adjust workinfo id to virtual value for remote trusted workinfos */ +static void remap_workinfo_id(sdata_t *sdata, json_t *val, const int64_t client_id) +{ + int64_t mapped_id, id; + workbase_t *wb; + + json_get_int64(&id, val, "workinfoid"); + + ck_rlock(&sdata->workbase_lock); + wb = __find_remote_workbase(sdata, id, client_id); + if (likely(wb)) + mapped_id = wb->mapped_id; + else + mapped_id = id; + ck_runlock(&sdata->workbase_lock); + + /* Replace value with mapped id */ + json_set_int64(val, "workinfoid", mapped_id); +} + +static void block_share_summary(sdata_t *sdata) +{ + double bdiff, sdiff; + + if (unlikely(!sdata->current_workbase || !sdata->current_workbase->network_diff)) + return; + + sdiff = sdata->stats.accounted_diff_shares; + bdiff = sdiff / sdata->current_workbase->network_diff * 100; + LOGWARNING("Block solved after %.0lf shares at %.1f%% diff", + sdiff, bdiff); +} + +static void block_solve(ckpool_t *ckp, json_t *val) +{ + char *msg, *workername = NULL; + sdata_t *sdata = ckp->sdata; + char cdfield[64]; + double diff = 0; + int height = 0; + ts_t ts_now; + + ts_realtime(&ts_now); + sprintf(cdfield, "%lu,%lu", ts_now.tv_sec, ts_now.tv_nsec); + + json_set_string(val, "confirmed", "1"); + json_set_string(val, "createdate", cdfield); + json_set_string(val, "createcode", __func__); + json_get_int(&height, val, "height"); + json_get_double(&diff, val, "diff"); + json_get_string(&workername, val, "workername"); + + if (!workername) { + ASPRINTF(&msg, "Block solved by %s!", ckp->name); + LOGWARNING("Solved and confirmed block!"); + } else { + json_t *user_val, *worker_val; + worker_instance_t *worker; + user_instance_t *user; + char *s; + + ASPRINTF(&msg, "Block %d solved by %s @ %s!", height, workername, ckp->name); + LOGWARNING("Solved and confirmed block %d by %s", height, workername); + user = user_by_workername(sdata, workername); + worker = get_worker(sdata, user, workername); + + ck_rlock(&sdata->instance_lock); + user_val = user_stats(user); + worker_val = worker_stats(worker); + ck_runlock(&sdata->instance_lock); + + s = json_dumps(user_val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER); + json_decref(user_val); + LOGWARNING("User %s:%s", user->username, s); + dealloc(s); + s = json_dumps(worker_val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER); + json_decref(worker_val); + LOGWARNING("Worker %s:%s", workername, s); + dealloc(s); + } + stratum_broadcast_message(sdata, msg); + free(msg); + + free(workername); + + block_share_summary(sdata); + reset_bestshares(sdata); +} + +static void block_reject(json_t *val) +{ + int height = 0; + + json_get_int(&height, val, "height"); + + LOGWARNING("Submitted, but had block %d rejected", height); +} + +/* Some upstream pools (like p2pool) don't update stratum often enough and + * miners disconnect if they don't receive regular communication so send them + * a ping at regular intervals */ +static void broadcast_ping(sdata_t *sdata) +{ + json_t *json_msg; + + JSON_CPACK(json_msg, "{s:[],s:i,s:s}", + "params", + "id", 42, + "method", "mining.ping"); + + stratum_broadcast(sdata, json_msg, SM_PING); +} + +static void ckmsgq_stats(ckmsgq_t *ckmsgq, const int size, json_t **val) +{ + int64_t memsize, generated; + ckmsg_t *msg; + int objects; + + mutex_lock(ckmsgq->lock); + DL_COUNT(ckmsgq->msgs, msg, objects); + generated = ckmsgq->messages; + mutex_unlock(ckmsgq->lock); + + memsize = (sizeof(ckmsg_t) + size) * objects; + JSON_CPACK(*val, "{si,si,sI}", "count", objects, "memory", memsize, "generated", generated); +} + +char *stratifier_stats(ckpool_t *ckp, void *data) +{ + json_t *val = json_object(), *subval; + int64_t memsize, generated; + sdata_t *sdata = data; + int objects; + char *buf; + + ck_rlock(&sdata->workbase_lock); + objects = HASH_COUNT(sdata->workbases); + memsize = SAFE_HASH_OVERHEAD(sdata->workbases) + sizeof(workbase_t) * objects; + generated = sdata->workbases_generated; + JSON_CPACK(subval, "{si,si,sI}", "count", objects, "memory", memsize, "generated", generated); + json_set_object(val, "workbases", subval); + objects = HASH_COUNT(sdata->remote_workbases); + memsize = SAFE_HASH_OVERHEAD(sdata->remote_workbases) + sizeof(workbase_t) * objects; + ck_runlock(&sdata->workbase_lock); + + JSON_CPACK(subval, "{si,si}", "count", objects, "memory", memsize); + json_set_object(val, "remote_workbases", subval); + + ck_rlock(&sdata->instance_lock); + if (ckp->btcsolo) { + user_instance_t *user, *tmpuser; + int subobjects; + + objects = 0; + memsize = 0; + HASH_ITER(hh, sdata->user_instances, user, tmpuser) { + subobjects = HASH_COUNT(user->userwbs); + objects += subobjects; + memsize += SAFE_HASH_OVERHEAD(user->userwbs) + sizeof(struct userwb) * subobjects; + } + generated = sdata->userwbs_generated; + JSON_CPACK(subval, "{si,si,sI}", "count", objects, "memory", memsize, "generated", generated); + json_set_object(val, "userwbs", subval); + } + + objects = HASH_COUNT(sdata->user_instances); + memsize = SAFE_HASH_OVERHEAD(sdata->user_instances) + sizeof(stratum_instance_t) * objects; + JSON_CPACK(subval, "{si,si}", "count", objects, "memory", memsize); + json_set_object(val, "users", subval); + + objects = HASH_COUNT(sdata->stratum_instances); + memsize = SAFE_HASH_OVERHEAD(sdata->stratum_instances); + generated = sdata->stratum_generated; + JSON_CPACK(subval, "{si,si,sI}", "count", objects, "memory", memsize, "generated", generated); + json_set_object(val, "clients", subval); + + objects = sdata->stats.disconnected; + generated = sdata->disconnected_generated; + memsize = SAFE_HASH_OVERHEAD(sdata->disconnected_sessions); + memsize += sizeof(session_t) * sdata->stats.disconnected; + JSON_CPACK(subval, "{si,si,sI}", "count", objects, "memory", memsize, "generated", generated); + json_set_object(val, "disconnected", subval); + ck_runlock(&sdata->instance_lock); + + mutex_lock(&sdata->share_lock); + generated = sdata->shares_generated; + objects = HASH_COUNT(sdata->shares); + memsize = SAFE_HASH_OVERHEAD(sdata->shares) + sizeof(share_t) * objects; + mutex_unlock(&sdata->share_lock); + + JSON_CPACK(subval, "{si,si,sI}", "count", objects, "memory", memsize, "generated", generated); + json_set_object(val, "shares", subval); + + ck_rlock(&sdata->txn_lock); + objects = HASH_COUNT(sdata->txns); + memsize = SAFE_HASH_OVERHEAD(sdata->txns) + sizeof(txntable_t) * objects; + generated = sdata->txns_generated; + JSON_CPACK(subval, "{si,si,sI}", "count", objects, "memory", memsize, "generated", generated); + json_set_object(val, "transactions", subval); + ck_runlock(&sdata->txn_lock); + + ckmsgq_stats(sdata->ssends, sizeof(smsg_t), &subval); + json_set_object(val, "ssends", subval); + /* Don't know exactly how big the string is so just count the pointer for now */ + ckmsgq_stats(sdata->srecvs, sizeof(char *), &subval); + json_set_object(val, "srecvs", subval); + ckmsgq_stats(sdata->stxnq, sizeof(json_params_t), &subval); + json_set_object(val, "stxnq", subval); + + buf = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER); + json_decref(val); + LOGNOTICE("Stratifier stats: %s", buf); + return buf; +} + +/* Send a single client a reconnect request, setting the time we sent the + * request so we can drop the client lazily if it hasn't reconnected on its + * own more than one minute later if we call reconnect again */ +static void reconnect_client(sdata_t *sdata, stratum_instance_t *client) +{ + json_t *json_msg; + + /* Already requested? */ + if (client->reconnect_request) { + if (time(NULL) - client->reconnect_request >= 60) + connector_drop_client(sdata->ckp, client->id); + return; + } + client->reconnect_request = time(NULL); + JSON_CPACK(json_msg, "{sosss[]}", "id", json_null(), "method", "client.reconnect", + "params"); + stratum_add_send(sdata, json_msg, client->id, SM_RECONNECT); +} + +static void dead_proxy(ckpool_t *ckp, sdata_t *sdata, const char *buf) +{ + int id = 0, subid = 0; + + sscanf(buf, "deadproxy=%d:%d", &id, &subid); + dead_proxyid(sdata, id, subid, false, false); + reap_proxies(ckp, sdata); +} + +static void del_proxy(ckpool_t *ckp, sdata_t *sdata, const char *buf) +{ + int id = 0, subid = 0; + + sscanf(buf, "delproxy=%d:%d", &id, &subid); + dead_proxyid(sdata, id, subid, false, true); + reap_proxies(ckp, sdata); +} + +static void reconnect_client_id(sdata_t *sdata, const int64_t client_id) +{ + stratum_instance_t *client; + + client = ref_instance_by_id(sdata, client_id); + if (!client) { + LOGINFO("reconnect_client_id failed to find client %"PRId64, client_id); + return; + } + client->reconnect = true; + reconnect_client(sdata, client); + dec_instance_ref(sdata, client); +} + +/* API commands */ + +static json_t *userinfo(const user_instance_t *user) +{ + json_t *val; + + JSON_CPACK(val, "{ss,si,si,sf,sf,sf,sf,sf,sf,si}", + "user", user->username, "id", user->id, "workers", user->workers, + "bestdiff", user->best_diff, "dsps1", user->dsps1, "dsps5", user->dsps5, + "dsps60", user->dsps60, "dsps1440", user->dsps1440, "dsps10080", user->dsps10080, + "lastshare", user->last_share.tv_sec); + return val; +} + +static void getuser(sdata_t *sdata, const char *buf, int *sockd) +{ + json_t *val = NULL, *res = NULL; + char *username = NULL; + user_instance_t *user; + json_error_t err_val; + + val = json_loads(buf, 0, &err_val); + if (unlikely(!val)) { + res = json_encode_errormsg(&err_val); + goto out; + } + if (!json_get_string(&username, val, "user")) { + res = json_errormsg("Failed to find user key"); + goto out; + } + if (!strlen(username)) { + res = json_errormsg("Zero length user key"); + goto out; + } + user = get_user(sdata, username); + res = userinfo(user); +out: + if (val) + json_decref(val); + free(username); + send_api_response(res, *sockd); +} + +static void userclients(sdata_t *sdata, const char *buf, int *sockd) +{ + json_t *val = NULL, *res = NULL, *client_arr; + stratum_instance_t *client; + char *username = NULL; + user_instance_t *user; + json_error_t err_val; + + val = json_loads(buf, 0, &err_val); + if (unlikely(!val)) { + res = json_encode_errormsg(&err_val); + goto out; + } + if (!json_get_string(&username, val, "user")) { + res = json_errormsg("Failed to find user key"); + goto out; + } + if (!strlen(username)) { + res = json_errormsg("Zero length user key"); + goto out; + } + user = get_user(sdata, username); + client_arr = json_array(); + + ck_rlock(&sdata->instance_lock); + DL_FOREACH2(user->clients, client, user_next) { + json_array_append_new(client_arr, json_integer(client->id)); + } + ck_runlock(&sdata->instance_lock); + + JSON_CPACK(res, "{ss,so}", "user", username, "clients", client_arr); +out: + if (val) + json_decref(val); + free(username); + send_api_response(res, *sockd); +} + +static void workerclients(sdata_t *sdata, const char *buf, int *sockd) +{ + json_t *val = NULL, *res = NULL, *client_arr; + char *tmp, *username, *workername = NULL; + stratum_instance_t *client; + user_instance_t *user; + json_error_t err_val; + + val = json_loads(buf, 0, &err_val); + if (unlikely(!val)) { + res = json_encode_errormsg(&err_val); + goto out; + } + if (!json_get_string(&workername, val, "worker")) { + res = json_errormsg("Failed to find worker key"); + goto out; + } + if (!strlen(workername)) { + res = json_errormsg("Zero length worker key"); + goto out; + } + tmp = strdupa(workername); + username = strsep(&tmp, "._"); + user = get_user(sdata, username); + client_arr = json_array(); + + ck_rlock(&sdata->instance_lock); + DL_FOREACH2(user->clients, client, user_next) { + if (strcmp(client->workername, workername)) + continue; + json_array_append_new(client_arr, json_integer(client->id)); + } + ck_runlock(&sdata->instance_lock); + + JSON_CPACK(res, "{ss,so}", "worker", workername, "clients", client_arr); +out: + if (val) + json_decref(val); + free(workername); + send_api_response(res, *sockd); +} + +static json_t *workerinfo(const user_instance_t *user, const worker_instance_t *worker) +{ + json_t *val; + + JSON_CPACK(val, "{ss,ss,si,sf,sf,sf,sf,si,sf,si,sb}", + "user", user->username, "worker", worker->workername, "id", user->id, + "dsps1", worker->dsps1, "dsps5", worker->dsps5, "dsps60", worker->dsps60, + "dsps1440", worker->dsps1440, "lastshare", worker->last_share.tv_sec, + "bestdiff", worker->best_diff, "mindiff", worker->mindiff, "idle", worker->idle); + return val; +} + +static void getworker(sdata_t *sdata, const char *buf, int *sockd) +{ + char *tmp, *username, *workername = NULL; + json_t *val = NULL, *res = NULL; + worker_instance_t *worker; + user_instance_t *user; + json_error_t err_val; + + val = json_loads(buf, 0, &err_val); + if (unlikely(!val)) { + res = json_encode_errormsg(&err_val); + goto out; + } + if (!json_get_string(&workername, val, "worker")) { + res = json_errormsg("Failed to find worker key"); + goto out; + } + if (!strlen(workername)) { + res = json_errormsg("Zero length worker key"); + goto out; + } + tmp = strdupa(workername); + username = strsep(&tmp, "._"); + user = get_user(sdata, username); + worker = get_worker(sdata, user, workername); + res = workerinfo(user, worker); +out: + if (val) + json_decref(val); + free(workername); + send_api_response(res, *sockd); +} + +static void getworkers(sdata_t *sdata, int *sockd) +{ + json_t *val = NULL, *worker_arr; + worker_instance_t *worker; + user_instance_t *user; + + worker_arr = json_array(); + + ck_rlock(&sdata->instance_lock); + for (user = sdata->user_instances; user; user = user->hh.next) { + DL_FOREACH(user->worker_instances, worker) { + json_array_append_new(worker_arr, workerinfo(user, worker)); + } + } + ck_runlock(&sdata->instance_lock); + + JSON_CPACK(val, "{so}", "workers", worker_arr); + send_api_response(val, *sockd); +} + +static void getusers(sdata_t *sdata, int *sockd) +{ + json_t *val = NULL, *user_array; + user_instance_t *user; + + user_array = json_array(); + + ck_rlock(&sdata->instance_lock); + for (user = sdata->user_instances; user; user = user->hh.next) { + json_array_append_new(user_array, userinfo(user)); + } + ck_runlock(&sdata->instance_lock); + + JSON_CPACK(val, "{so}", "users", user_array); + send_api_response(val, *sockd); +} + +static json_t *clientinfo(const stratum_instance_t *client) +{ + json_t *val = json_object(); + + /* Too many fields for a pack object, do each discretely to keep track */ + json_set_int(val, "id", client->id); + json_set_string(val, "enonce1", client->enonce1); + json_set_string(val, "enonce1var", client->enonce1var); + json_set_int(val, "enonce1_64", client->enonce1_64); + json_set_double(val, "diff", client->diff); + json_set_double(val, "dsps1", client->dsps1); + json_set_double(val, "dsps5", client->dsps5); + json_set_double(val, "dsps60", client->dsps60); + json_set_double(val, "dsps1440", client->dsps1440); + json_set_double(val, "dsps10080", client->dsps10080); + json_set_int(val, "lastshare", client->last_share.tv_sec); + json_set_int(val, "starttime", client->start_time); + json_set_string(val, "address", client->address); + json_set_bool(val, "subscribed", client->subscribed); + json_set_bool(val, "authorised", client->authorised); + json_set_bool(val, "idle", client->idle); + json_set_string(val, "useragent", client->useragent ? client->useragent : ""); + json_set_string(val, "workername", client->workername ? client->workername : ""); + json_set_int(val, "userid", client->user_id); + json_set_int(val, "server", client->server); + json_set_double(val, "bestdiff", client->best_diff); + json_set_int(val, "proxyid", client->proxyid); + json_set_int(val, "subproxyid", client->subproxyid); + + return val; +} + +static void getclient(sdata_t *sdata, const char *buf, int *sockd) +{ + json_t *val = NULL, *res = NULL; + stratum_instance_t *client; + json_error_t err_val; + int64_t client_id; + + val = json_loads(buf, 0, &err_val); + if (unlikely(!val)) { + res = json_encode_errormsg(&err_val); + goto out; + } + if (!json_get_int64(&client_id, val, "id")) { + res = json_errormsg("Failed to find id key"); + goto out; + } + client = ref_instance_by_id(sdata, client_id); + if (!client) { + res = json_errormsg("Failed to find client %"PRId64, client_id); + goto out; + } + res = clientinfo(client); + + dec_instance_ref(sdata, client); +out: + if (val) + json_decref(val); + send_api_response(res, *sockd); +} + +static void getclients(sdata_t *sdata, int *sockd) +{ + json_t *val = NULL, *client_arr; + stratum_instance_t *client; + + client_arr = json_array(); + + ck_rlock(&sdata->instance_lock); + for (client = sdata->stratum_instances; client; client = client->hh.next) { + json_array_append_new(client_arr, clientinfo(client)); + } + ck_runlock(&sdata->instance_lock); + + JSON_CPACK(val, "{so}", "clients", client_arr); + send_api_response(val, *sockd); +} + +static void user_clientinfo(sdata_t *sdata, const char *buf, int *sockd) +{ + json_t *val = NULL, *res = NULL, *client_arr; + stratum_instance_t *client; + char *username = NULL; + user_instance_t *user; + json_error_t err_val; + + val = json_loads(buf, 0, &err_val); + if (unlikely(!val)) { + res = json_encode_errormsg(&err_val); + goto out; + } + if (!json_get_string(&username, val, "user")) { + res = json_errormsg("Failed to find user key"); + goto out; + } + if (!strlen(username)) { + res = json_errormsg("Zero length user key"); + goto out; + } + user = get_user(sdata, username); + client_arr = json_array(); + + ck_rlock(&sdata->instance_lock); + DL_FOREACH2(user->clients, client, user_next) { + json_array_append_new(client_arr, clientinfo(client)); + } + ck_runlock(&sdata->instance_lock); + + JSON_CPACK(res, "{ss,so}", "user", username, "clients", client_arr); +out: + if (val) + json_decref(val); + free(username); + send_api_response(res, *sockd); +} + +static void worker_clientinfo(sdata_t *sdata, const char *buf, int *sockd) +{ + json_t *val = NULL, *res = NULL, *client_arr; + char *tmp, *username, *workername = NULL; + stratum_instance_t *client; + user_instance_t *user; + json_error_t err_val; + + val = json_loads(buf, 0, &err_val); + if (unlikely(!val)) { + res = json_encode_errormsg(&err_val); + goto out; + } + if (!json_get_string(&workername, val, "worker")) { + res = json_errormsg("Failed to find worker key"); + goto out; + } + if (!strlen(workername)) { + res = json_errormsg("Zero length worker key"); + goto out; + } + tmp = strdupa(workername); + username = strsep(&tmp, "._"); + user = get_user(sdata, username); + client_arr = json_array(); + + ck_rlock(&sdata->instance_lock); + DL_FOREACH2(user->clients, client, user_next) { + if (strcmp(client->workername, workername)) + continue; + json_array_append_new(client_arr, clientinfo(client)); + } + ck_runlock(&sdata->instance_lock); + + JSON_CPACK(res, "{ss,so}", "worker", workername, "clients", client_arr); +out: + if (val) + json_decref(val); + free(workername); + send_api_response(res, *sockd); +} + +/* Return the user masked priority value of the proxy */ +static int proxy_prio(const proxy_t *proxy) +{ + int prio = proxy->priority & 0x00000000ffffffff; + + return prio; +} + +static json_t *json_proxyinfo(const proxy_t *proxy) +{ + const proxy_t *parent = proxy->parent; + json_t *val; + + JSON_CPACK(val, "{si,si,si,sf,ss,ss,ss,ss,ss,si,si,si,si,sb,sb,sI,sI,sI,sI,sI,si,sb,sb,si}", + "id", proxy->id, "subid", proxy->subid, "priority", proxy_prio(parent), + "diff", proxy->diff, "baseurl", proxy->baseurl, "url", proxy->url, + "auth", proxy->auth, "pass", proxy->pass, + "enonce1", proxy->enonce1, "enonce1constlen", proxy->enonce1constlen, + "enonce1varlen", proxy->enonce1varlen, "nonce2len", proxy->nonce2len, + "enonce2varlen", proxy->enonce2varlen, "subscribed", proxy->subscribed, + "notified", proxy->notified, "clients", proxy->clients, "maxclients", proxy->max_clients, + "bound_clients", proxy->bound_clients, "combined_clients", parent->combined_clients, + "headroom", proxy->headroom, "subproxy_count", parent->subproxy_count, + "dead", proxy->dead, "global", proxy->global, "userid", proxy->userid); + return val; +} + +static void getproxy(sdata_t *sdata, const char *buf, int *sockd) +{ + json_t *val = NULL, *res = NULL; + json_error_t err_val; + int id, subid = 0; + proxy_t *proxy; + + val = json_loads(buf, 0, &err_val); + if (unlikely(!val)) { + res = json_encode_errormsg(&err_val); + goto out; + } + if (!json_get_int(&id, val, "id")) { + res = json_errormsg("Failed to find id key"); + goto out; + } + json_get_int(&subid, val, "subid"); + if (!subid) + proxy = existing_proxy(sdata, id); + else + proxy = existing_subproxy(sdata, id, subid); + if (!proxy) { + res = json_errormsg("Failed to find proxy %d:%d", id, subid); + goto out; + } + res = json_proxyinfo(proxy); +out: + if (val) + json_decref(val); + send_api_response(res, *sockd); +} + +static void proxyinfo(sdata_t *sdata, const char *buf, int *sockd) +{ + json_t *val = NULL, *res = NULL, *arr_val = json_array(); + proxy_t *proxy, *subproxy; + bool all = true; + int userid = 0; + + if (buf) { + /* See if there's a userid specified */ + val = json_loads(buf, 0, NULL); + if (json_get_int(&userid, val, "userid")) + all = false; + } + + mutex_lock(&sdata->proxy_lock); + for (proxy = sdata->proxies; proxy; proxy = proxy->hh.next) { + if (!all && proxy->userid != userid) + continue; + for (subproxy = proxy->subproxies; subproxy; subproxy = subproxy->sh.next) + json_array_append_new(arr_val, json_proxyinfo(subproxy)); + } + mutex_unlock(&sdata->proxy_lock); + + if (val) + json_decref(val); + JSON_CPACK(res, "{so}", "proxies", arr_val); + send_api_response(res, *sockd); +} + +static void setproxy(sdata_t *sdata, const char *buf, int *sockd) +{ + json_t *val = NULL, *res = NULL; + json_error_t err_val; + int id, priority; + proxy_t *proxy; + + val = json_loads(buf, 0, &err_val); + if (unlikely(!val)) { + res = json_encode_errormsg(&err_val); + goto out; + } + if (!json_get_int(&id, val, "id")) { + res = json_errormsg("Failed to find id key"); + goto out; + } + if (!json_get_int(&priority, val, "priority")) { + res = json_errormsg("Failed to find priority key"); + goto out; + } + proxy = existing_proxy(sdata, id); + if (!proxy) { + res = json_errormsg("Failed to find proxy %d", id); + goto out; + } + if (priority != proxy_prio(proxy)) + set_proxy_prio(sdata, proxy, priority); + res = json_proxyinfo(proxy); +out: + if (val) + json_decref(val); + send_api_response(res, *sockd); +} + +static void get_poolstats(sdata_t *sdata, int *sockd) +{ + pool_stats_t *stats = &sdata->stats; + json_t *val; + + mutex_lock(&sdata->stats_lock); + JSON_CPACK(val, "{si,si,si,si,si,sI,sf,sf,sf,sf,sI,sI,sf,sf,sf,sf,sf,sf,sf}", + "start", stats->start_time.tv_sec, "update", stats->last_update.tv_sec, + "workers", stats->workers + stats->remote_workers, "users", stats->users + stats->remote_users, + "disconnected", stats->disconnected, + "shares", stats->accounted_shares, "sps1", stats->sps1, "sps5", stats->sps5, + "sps15", stats->sps15, "sps60", stats->sps60, "accepted", stats->accounted_diff_shares, + "rejected", stats->accounted_rejects, "dsps1", stats->dsps1, "dsps5", stats->dsps5, + "dsps15", stats->dsps15, "dsps60", stats->dsps60, "dsps360", stats->dsps360, + "dsps1440", stats->dsps1440, "dsps10080", stats->dsps10080); + mutex_unlock(&sdata->stats_lock); + + send_api_response(val, *sockd); +} + +static void get_uptime(sdata_t *sdata, int *sockd) +{ + int uptime = time(NULL) - sdata->stats.start_time.tv_sec; + json_t *val; + + JSON_CPACK(val, "{si}", "uptime", uptime); + send_api_response(val, *sockd); +} + +static void stratum_loop(ckpool_t *ckp, proc_instance_t *pi) +{ + sdata_t *sdata = ckp->sdata; + unix_msg_t *umsg = NULL; + int ret = 0; + char *buf; + +retry: + if (umsg) { + Close(umsg->sockd); + free(umsg->buf); + dealloc(umsg); + } + + do { + time_t end_t; + + end_t = time(NULL); + if (end_t - sdata->update_time >= ckp->update_interval) { + sdata->update_time = end_t; + if (!ckp->proxy) { + LOGDEBUG("%ds elapsed in strat_loop, updating gbt base", + ckp->update_interval); + update_base(sdata, GEN_NORMAL); + } else if (!ckp->passthrough) { + LOGDEBUG("%ds elapsed in strat_loop, pinging miners", + ckp->update_interval); + broadcast_ping(sdata); + } + } + + umsg = get_unix_msg(pi); + } while (!umsg); + + buf = umsg->buf; + if (buf[0] == '{') { + json_t *val = json_loads(buf, JSON_DISABLE_EOF_CHECK, NULL); + + /* This is a message for a node */ + if (likely(val)) + ckmsgq_add(sdata->srecvs, val); + goto retry; + } + if (cmdmatch(buf, "ping")) { + LOGDEBUG("Stratifier received ping request"); + send_unix_msg(umsg->sockd, "pong"); + goto retry; + } + if (cmdmatch(buf, "stats")) { + char *msg; + + LOGDEBUG("Stratifier received stats request"); + msg = stratifier_stats(ckp, sdata); + send_unix_msg(umsg->sockd, msg); + goto retry; + } + /* Parse API commands here to return a message to sockd */ + if (cmdmatch(buf, "clients")) { + getclients(sdata, &umsg->sockd); + goto retry; + } + if (cmdmatch(buf, "workers")) { + getworkers(sdata, &umsg->sockd); + goto retry; + } + if (cmdmatch(buf, "users")) { + getusers(sdata, &umsg->sockd); + goto retry; + } + if (cmdmatch(buf, "getclient")) { + getclient(sdata, buf + 10, &umsg->sockd); + goto retry; + } + if (cmdmatch(buf, "getuser")) { + getuser(sdata, buf + 8, &umsg->sockd); + goto retry; + } + if (cmdmatch(buf, "getworker")) { + getworker(sdata, buf + 10, &umsg->sockd); + goto retry; + } + if (cmdmatch(buf, "userclients")) { + userclients(sdata, buf + 12, &umsg->sockd); + goto retry; + } + if (cmdmatch(buf, "workerclients")) { + workerclients(sdata, buf + 14, &umsg->sockd); + goto retry; + } + if (cmdmatch(buf, "getproxy")) { + getproxy(sdata, buf + 9, &umsg->sockd); + goto retry; + } + if (cmdmatch(buf, "setproxy")) { + setproxy(sdata, buf + 9, &umsg->sockd); + goto retry; + } + if (cmdmatch(buf, "poolstats")) { + get_poolstats(sdata, &umsg->sockd); + goto retry; + } + if (cmdmatch(buf, "proxyinfo")) { + proxyinfo(sdata, buf + 10, &umsg->sockd); + goto retry; + } + if (cmdmatch(buf, "ucinfo")) { + user_clientinfo(sdata, buf + 7, &umsg->sockd); + goto retry; + } + if (cmdmatch(buf,"uptime")) { + get_uptime(sdata, &umsg->sockd); + goto retry; + } + if (cmdmatch(buf, "wcinfo")) { + worker_clientinfo(sdata, buf + 7, &umsg->sockd); + goto retry; + } + + LOGDEBUG("Stratifier received request: %s", buf); + if (cmdmatch(buf, "update")) { + update_base(sdata, GEN_PRIORITY); + } else if (cmdmatch(buf, "subscribe")) { + /* Proxifier has a new subscription */ + update_subscribe(ckp, buf); + } else if (cmdmatch(buf, "notify")) { + /* Proxifier has a new notify ready */ + update_notify(ckp, buf); + } else if (cmdmatch(buf, "diff")) { + update_diff(ckp, buf); + } else if (cmdmatch(buf, "dropclient")) { + int64_t client_id; + + ret = sscanf(buf, "dropclient=%"PRId64, &client_id); + if (ret < 0) + LOGDEBUG("Stratifier failed to parse dropclient command: %s", buf); + else + drop_client(ckp, sdata, client_id); + } else if (cmdmatch(buf, "reconnclient")) { + int64_t client_id; + + ret = sscanf(buf, "reconnclient=%"PRId64, &client_id); + if (ret < 0) + LOGWARNING("Stratifier failed to parse reconnclient command: %s", buf); + else + reconnect_client_id(sdata, client_id); + } else if (cmdmatch(buf, "dropall")) { + drop_allclients(ckp); + } else if (cmdmatch(buf, "reconnect")) { + request_reconnect(sdata, buf); + } else if (cmdmatch(buf, "deadproxy")) { + dead_proxy(ckp, sdata, buf); + } else if (cmdmatch(buf, "delproxy")) { + del_proxy(ckp, sdata, buf); + } else if (cmdmatch(buf, "loglevel")) { + sscanf(buf, "loglevel=%d", &ckp->loglevel); + } else if (cmdmatch(buf, "resetshares")) { + reset_bestshares(sdata); + } else + LOGWARNING("Unhandled stratifier message: %s", buf); + goto retry; +} + +static void *blockupdate(void *arg) +{ + ckpool_t *ckp = (ckpool_t *)arg; + sdata_t *sdata = ckp->sdata; + char hash[68]; + + pthread_detach(pthread_self()); + rename_proc("blockupdate"); + + while (42) { + int ret; + + ret = generator_getbest(ckp, hash); + switch (ret) { + case GETBEST_NOTIFY: + cksleep_ms(5000); + break; + case GETBEST_SUCCESS: + if (strcmp(hash, sdata->lastswaphash)) { + update_base(sdata, GEN_PRIORITY); + break; + } + [[fallthrough]]; + case GETBEST_FAILED: + default: + cksleep_ms(ckp->blockpoll); + } + } + return NULL; +} + +/* Enter holding workbase_lock and client a ref count. */ +static void __fill_enonce1data(const workbase_t *wb, stratum_instance_t *client) +{ + if (wb->enonce1constlen) + memcpy(client->enonce1bin, wb->enonce1constbin, wb->enonce1constlen); + if (wb->enonce1varlen) { + memcpy(client->enonce1bin + wb->enonce1constlen, &client->enonce1_64, wb->enonce1varlen); + __bin2hex(client->enonce1var, &client->enonce1_64, wb->enonce1varlen); + } + __bin2hex(client->enonce1, client->enonce1bin, wb->enonce1constlen + wb->enonce1varlen); +} + +/* Create a new enonce1 from the 64 bit enonce1_64 value, using only the number + * of bytes we have to work with when we are proxying with a split nonce2. + * When the proxy space is less than 32 bits to work with, we look for an + * unused enonce1 value and reject clients instead if there is no space left. + * Needs to be entered with client holding a ref count. */ +static bool new_enonce1(ckpool_t *ckp, sdata_t *ckp_sdata, sdata_t *sdata, stratum_instance_t *client) +{ + proxy_t *proxy = NULL; + uint64_t enonce1; + + if (ckp->proxy) { + if (!ckp_sdata->proxy) + return false; + + mutex_lock(&ckp_sdata->proxy_lock); + proxy = sdata->subproxy; + client->proxyid = proxy->id; + client->subproxyid = proxy->subid; + mutex_unlock(&ckp_sdata->proxy_lock); + + if (proxy->clients >= proxy->max_clients) { + LOGWARNING("Proxy reached max clients %"PRId64, proxy->max_clients); + return false; + } + } + + /* Still initialising */ + if (unlikely(!sdata->current_workbase)) + return false; + + /* instance_lock protects enonce1_64. Incrementing a little endian 64bit + * number ensures that no matter how many of the bits we take from the + * left depending on nonce2 length, we'll always get a changing value + * for every next client.*/ + ck_wlock(&ckp_sdata->instance_lock); + enonce1 = le64toh(ckp_sdata->enonce1_64); + enonce1++; + client->enonce1_64 = ckp_sdata->enonce1_64 = htole64(enonce1); + if (proxy) { + client->proxy = proxy; + proxy->clients++; + proxy->bound_clients++; + proxy->parent->combined_clients++; + } + ck_wunlock(&ckp_sdata->instance_lock); + + ck_rlock(&sdata->workbase_lock); + __fill_enonce1data(sdata->current_workbase, client); + ck_runlock(&sdata->workbase_lock); + + return true; +} + +static void stratum_send_message(sdata_t *sdata, const stratum_instance_t *client, const char *msg); + +/* Need to hold sdata->proxy_lock */ +static proxy_t *__best_subproxy(proxy_t *proxy) +{ + proxy_t *subproxy, *best = NULL, *tmp; + int64_t max_headroom; + + proxy->headroom = max_headroom = 0; + HASH_ITER(sh, proxy->subproxies, subproxy, tmp) { + int64_t subproxy_headroom; + + if (subproxy->dead) + continue; + if (!subproxy->sdata->current_workbase) + continue; + subproxy_headroom = subproxy->max_clients - subproxy->clients; + + proxy->headroom += subproxy_headroom; + if (subproxy_headroom > max_headroom) { + best = subproxy; + max_headroom = subproxy_headroom; + } + if (best) + break; + } + return best; +} + +/* Choose the stratifier data for a new client. Use the main ckp_sdata except + * in proxy mode where we find a subproxy based on the current proxy with room + * for more clients. Signal the generator to recruit more subproxies if we are + * running out of room. */ +static sdata_t *select_sdata(ckpool_t *ckp, sdata_t *ckp_sdata, const int userid) +{ + proxy_t *global, *proxy, *tmp, *best = NULL; + + if (!ckp->proxy || ckp->passthrough) + return ckp_sdata; + + /* Proxies are ordered by priority so first available will be the best + * priority */ + mutex_lock(&ckp_sdata->proxy_lock); + best = global = ckp_sdata->proxy; + + HASH_ITER(hh, ckp_sdata->proxies, proxy, tmp) { + if (proxy->userid < userid) + continue; + if (proxy->userid > userid) + break; + best = __best_subproxy(proxy); + if (best) + break; + } + mutex_unlock(&ckp_sdata->proxy_lock); + + if (!best) { + if (!userid) + LOGWARNING("Temporarily insufficient proxies to accept more clients"); + else + LOGNOTICE("Temporarily insufficient proxies for userid %d to accept more clients", userid); + return NULL; + } + if (!userid) { + if (best->id != global->id || current_headroom(ckp_sdata, &proxy) < 2) + generator_recruit(ckp, global->id, 1); + } else { + if (best_userproxy_headroom(ckp_sdata, userid) < 2) + generator_recruit(ckp, best->id, 1); + } + return best->sdata; +} + +static int int_from_sessionid(const char *sessionid) +{ + int ret = 0, slen; + + if (!sessionid) + goto out; + slen = strlen(sessionid) / 2; + if (slen < 1 || slen > 4) + goto out; + + if (!validhex(sessionid)) + goto out; + + sscanf(sessionid, "%x", &ret); +out: + return ret; +} + +static int userid_from_sessionid(sdata_t *sdata, const int session_id) +{ + session_t *session; + int ret = -1; + + ck_wlock(&sdata->instance_lock); + HASH_FIND_INT(sdata->disconnected_sessions, &session_id, session); + if (!session) + goto out_unlock; + HASH_DEL(sdata->disconnected_sessions, session); + sdata->stats.disconnected--; + ret = session->userid; + dealloc(session); +out_unlock: + ck_wunlock(&sdata->instance_lock); + + if (ret != -1) + LOGINFO("Found old session id %d for userid %d", session_id, ret); + return ret; +} + +static int userid_from_sessionip(sdata_t *sdata, const char *address) +{ + session_t *session, *tmp; + int ret = -1; + + ck_wlock(&sdata->instance_lock); + HASH_ITER(hh, sdata->disconnected_sessions, session, tmp) { + if (!strcmp(session->address, address)) { + ret = session->userid; + break; + } + } + if (ret == -1) + goto out_unlock; + HASH_DEL(sdata->disconnected_sessions, session); + sdata->stats.disconnected--; + dealloc(session); +out_unlock: + ck_wunlock(&sdata->instance_lock); + + if (ret != -1) + LOGINFO("Found old session address %s for userid %d", address, ret); + return ret; +} + +/* Extranonce1 must be set here. Needs to be entered with client holding a ref + * count. */ +static json_t *parse_subscribe(stratum_instance_t *client, const int64_t client_id, const json_t *params_val) +{ + ckpool_t *ckp = client->ckp; + sdata_t *sdata, *ckp_sdata = ckp->sdata; + int session_id = 0, userid = -1; + bool old_match = false; + char sessionid[12]; + int arr_size; + json_t *ret; + int n2len; + + if (unlikely(!json_is_array(params_val))) { + stratum_send_message(ckp_sdata, client, "Invalid json: params not an array"); + return json_string("params not an array"); + } + + sdata = select_sdata(ckp, ckp_sdata, 0); + if (unlikely(!ckp->node && (!sdata || !sdata->current_workbase))) { + LOGWARNING("Failed to provide subscription due to no %s", sdata ? "current workbase" : "sdata"); + stratum_send_message(ckp_sdata, client, "Pool Initialising"); + return json_string("Initialising"); + } + + arr_size = json_array_size(params_val); + /* NOTE useragent is NULL prior to this so should not be used in code + * till after this point */ + if (arr_size > 0) { + const char *buf; + + buf = json_string_value(json_array_get(params_val, 0)); + if (buf && strlen(buf)) + client->useragent = strdup(buf); + else + client->useragent = ckzalloc(1); // Set to "" + if (arr_size > 1) { + /* This would be the session id for reconnect, it will + * not work for clients on a proxied connection. */ + buf = json_string_value(json_array_get(params_val, 1)); + session_id = int_from_sessionid(buf); + LOGDEBUG("Found old session id %d", session_id); + } + if (!ckp->proxy && session_id && !subclient(client_id)) { + if ((client->enonce1_64 = disconnected_sessionid_exists(sdata, session_id, client_id))) { + sprintf(client->enonce1, "%016lx", client->enonce1_64); + old_match = true; + + ck_rlock(&ckp_sdata->workbase_lock); + __fill_enonce1data(sdata->current_workbase, client); + ck_runlock(&ckp_sdata->workbase_lock); + } + } + } else + client->useragent = ckzalloc(1); + + /* Whitelist cgminer based clients to receive stratum messages */ + if (strcasestr(client->useragent, "gminer")) + client->messages = true; + + /* We got what we needed */ + if (ckp->node) + return NULL; + + if (ckp->proxy) { + /* Use the session_id to tell us which user this was. + * If it's not available, see if there's an IP address + * which matches a recently disconnected session. */ + if (session_id) + userid = userid_from_sessionid(ckp_sdata, session_id); + if (userid == -1) + userid = userid_from_sessionip(ckp_sdata, client->address); + if (userid != -1) { + sdata_t *user_sdata = select_sdata(ckp, ckp_sdata, userid); + + if (user_sdata) + sdata = user_sdata; + } + } + + client->sdata = sdata; + if (ckp->proxy) { + LOGINFO("Current %d, selecting proxy %d:%d for client %s", ckp_sdata->proxy->id, + sdata->subproxy->id, sdata->subproxy->subid, client->identity); + } + + if (!old_match) { + /* Create a new extranonce1 based on a uint64_t pointer */ + if (!new_enonce1(ckp, ckp_sdata, sdata, client)) { + stratum_send_message(sdata, client, "Pool full of clients"); + client->reject = 3; + return json_string("proxy full"); + } + LOGINFO("Set new subscription %s to new enonce1 %lx string %s", client->identity, + client->enonce1_64, client->enonce1); + } else { + LOGINFO("Set new subscription %s to old matched enonce1 %lx string %s", + client->identity, client->enonce1_64, client->enonce1); + } + + /* Workbases will exist if sdata->current_workbase is not NULL */ + ck_rlock(&sdata->workbase_lock); + n2len = sdata->workbases->enonce2varlen; + sprintf(sessionid, "%08x", client->session_id); + JSON_CPACK(ret, "[[[s,s]],s,i]", "mining.notify", sessionid, client->enonce1, + n2len); + ck_runlock(&sdata->workbase_lock); + + client->subscribed = true; + + return ret; +} + +static double dsps_from_key(json_t *val, const char *key) +{ + char *string, *endptr; + double ret = 0; + + json_get_string(&string, val, key); + if (!string) + return ret; + ret = strtod(string, &endptr) / nonces; + if (endptr) { + switch (endptr[0]) { + case 'E': + ret *= (double)1000; + [[fallthrough]]; + case 'P': + ret *= (double)1000; + [[fallthrough]]; + case 'T': + ret *= (double)1000; + [[fallthrough]]; + case 'G': + ret *= (double)1000; + [[fallthrough]]; + case 'M': + ret *= (double)1000; + [[fallthrough]]; + case 'K': + ret *= (double)1000; + [[fallthrough]]; + default: + break; + } + } + free(string); + return ret; +} + +static void decay_client(stratum_instance_t *client, double diff, tv_t *now_t) +{ + double tdiff = sane_tdiff(now_t, &client->last_decay); + + /* If we're calling the hashmeter too frequently we'll just end up + * racing and having inappropriate values, so store up diff and update + * at most 20 times per second. Use an integer for uadiff to make the + * update atomic */ + if (tdiff < 0.05) { + client->uadiff += diff; + return; + } + copy_tv(&client->last_decay, now_t); + diff += client->uadiff; + client->uadiff = 0; + decay_time(&client->dsps1, diff, tdiff, MIN1); + decay_time(&client->dsps5, diff, tdiff, MIN5); + decay_time(&client->dsps60, diff, tdiff, HOUR); + decay_time(&client->dsps1440, diff, tdiff, DAY); + decay_time(&client->dsps10080, diff, tdiff, WEEK); +} + +static void decay_worker(worker_instance_t *worker, double diff, tv_t *now_t) +{ + double tdiff = sane_tdiff(now_t, &worker->last_decay); + + if (tdiff < 0.05) { + worker->uadiff += diff; + return; + } + copy_tv(&worker->last_decay, now_t); + diff += worker->uadiff; + worker->uadiff = 0; + decay_time(&worker->dsps1, diff, tdiff, MIN1); + decay_time(&worker->dsps5, diff, tdiff, MIN5); + decay_time(&worker->dsps60, diff, tdiff, HOUR); + decay_time(&worker->dsps1440, diff, tdiff, DAY); + decay_time(&worker->dsps10080, diff, tdiff, WEEK); +} + +static void decay_user(user_instance_t *user, double diff, tv_t *now_t) +{ + double tdiff = sane_tdiff(now_t, &user->last_decay); + + if (tdiff < 0.05) { + user->uadiff += diff; + return; + } + copy_tv(&user->last_decay, now_t); + diff += user->uadiff; + user->uadiff = 0; + decay_time(&user->dsps1, diff, tdiff, MIN1); + decay_time(&user->dsps5, diff, tdiff, MIN5); + decay_time(&user->dsps60, diff, tdiff, HOUR); + decay_time(&user->dsps1440, diff, tdiff, DAY); + decay_time(&user->dsps10080, diff, tdiff, WEEK); +} + +static user_instance_t *get_create_user(sdata_t *sdata, const char *username, bool *new_user); +static worker_instance_t *get_create_worker(sdata_t *sdata, user_instance_t *user, + const char *workername, bool *new_worker); + +/* Load the statistics of and create all known users at startup */ +static void read_userstats(ckpool_t *ckp, sdata_t *sdata, int tvsec_diff) +{ + char dnam[256], s[4096], *username, *buf; + int ret, users = 0, workers = 0; + user_instance_t *user; + struct dirent *dir; + struct stat fdbuf; + bool new_user; + json_t *val; + FILE *fp; + tv_t now; + DIR *d; + int fd; + + snprintf(dnam, 255, "%susers", ckp->logdir); + d = opendir(dnam); + if (!d) { + LOGNOTICE("No user directory found"); + return; + } + + tv_time(&now); + + while ((dir = readdir(d)) != NULL) { + json_t *worker_array, *arr_val; + int64_t authorised; + int lastshare; + size_t index; + + username = basename(dir->d_name); + if (!strcmp(username, "/") || !strcmp(username, ".") || !strcmp(username, "..")) + continue; + + new_user = false; + user = get_create_user(sdata, username, &new_user); + if (unlikely(!new_user)) { + /* All users should be new at this stage */ + LOGWARNING("Duplicate user in read_userstats %s", username); + continue; + } + users++; + snprintf(s, 4095, "%s/%s", dnam, username); + fp = fopen(s, "re"); + if (unlikely(!fp)) { + /* Permission problems should be the only reason this happens */ + LOGWARNING("Failed to load user %s logfile to read", username); + continue; + } + fd = fileno(fp); + if (unlikely(fstat(fd, &fdbuf))) { + LOGERR("Failed to fstat user %s logfile", username); + fclose(fp); + continue; + } + /* We don't know how big the logfile will be so allocate + * according to file size */ + buf = ckzalloc(fdbuf.st_size + 1); + ret = fread(buf, 1, fdbuf.st_size, fp); + fclose(fp); + if (ret < 1) { + LOGNOTICE("Failed to read user %s logfile", username); + dealloc(buf); + continue; + } + val = json_loads(buf, 0, NULL); + if (!val) { + LOGNOTICE("Failed to json decode user %s logfile: %s", username, buf); + dealloc(buf); + continue; + } + dealloc(buf); + + copy_tv(&user->last_share, &now); + copy_tv(&user->last_decay, &now); + user->dsps1 = dsps_from_key(val, "hashrate1m"); + user->dsps5 = dsps_from_key(val, "hashrate5m"); + user->dsps60 = dsps_from_key(val, "hashrate1hr"); + user->dsps1440 = dsps_from_key(val, "hashrate1d"); + user->dsps10080 = dsps_from_key(val, "hashrate7d"); + json_get_int(&lastshare, val, "lastshare"); + user->last_share.tv_sec = lastshare; + json_get_int64(&user->shares, val, "shares"); + json_get_double(&user->best_diff, val, "bestshare"); + json_get_int64(&user->best_ever, val, "bestever"); + json_get_int64(&authorised, val, "authorised"); + user->auth_time = authorised; + if (user->best_diff > user->best_ever) + user->best_ever = user->best_diff; + LOGINFO("Successfully read user %s stats %f %f %f %f %f %f %ld %ld", user->username, + user->dsps1, user->dsps5, user->dsps60, user->dsps1440, + user->dsps10080, user->best_diff, user->best_ever, user->auth_time); + if (tvsec_diff > 60) + decay_user(user, 0, &now); + + worker_array = json_object_get(val, "worker"); + json_array_foreach(worker_array, index, arr_val) { + const char *workername = json_string_value(json_object_get(arr_val, "workername")); + worker_instance_t *worker; + bool new_worker = false; + + if (unlikely(!workername || !strlen(workername)) || + !strstr(workername, username)) { + LOGWARNING("Invalid workername in read_userstats %s", workername); + continue; + } + worker = get_create_worker(sdata, user, workername, &new_worker); + if (unlikely(!new_worker)) { + LOGWARNING("Duplicate worker in read_userstats %s", workername); + continue; + } + workers++; + copy_tv(&worker->last_decay, &now); + worker->dsps1 = dsps_from_key(arr_val, "hashrate1m"); + worker->dsps5 = dsps_from_key(arr_val, "hashrate5m"); + worker->dsps60 = dsps_from_key(arr_val, "hashrate1hr"); + worker->dsps1440 = dsps_from_key(arr_val, "hashrate1d"); + worker->dsps10080 = dsps_from_key(arr_val, "hashrate7d"); + json_get_int(&lastshare, arr_val, "lastshare"); + worker->last_share.tv_sec = lastshare; + json_get_double(&worker->best_diff, arr_val, "bestshare"); + json_get_int64(&worker->best_ever, arr_val, "bestever"); + if (worker->best_diff > worker->best_ever) + worker->best_ever = worker->best_diff; + json_get_int64(&worker->shares, arr_val, "shares"); + LOGINFO("Successfully read worker %s stats %f %f %f %f %f %ld", worker->workername, + worker->dsps1, worker->dsps5, worker->dsps60, worker->dsps1440, worker->best_diff, worker->best_ever); + if (tvsec_diff > 60) + decay_worker(worker, 0, &now); + } + json_decref(val); + } + closedir(d); + + if (likely(users)) + LOGWARNING("Loaded %d users and %d workers", users, workers); +} + +#define DEFAULT_AUTH_BACKOFF (3) /* Set initial backoff to 3 seconds */ + +static user_instance_t *__create_user(sdata_t *sdata, const char *username) +{ + user_instance_t *user = ckzalloc(sizeof(user_instance_t)); + + user->auth_backoff = DEFAULT_AUTH_BACKOFF; + strcpy(user->username, username); + user->id = ++sdata->user_instance_id; + HASH_ADD_STR(sdata->user_instances, username, user); + return user; +} + + +/* Find user by username or create one if it doesn't already exist */ +static user_instance_t *get_create_user(sdata_t *sdata, const char *username, bool *new_user) +{ + user_instance_t *user; + + ck_wlock(&sdata->instance_lock); + HASH_FIND_STR(sdata->user_instances, username, user); + if (unlikely(!user)) { + user = __create_user(sdata, username); + *new_user = true; + } + ck_wunlock(&sdata->instance_lock); + + return user; +} + +static user_instance_t *get_user(sdata_t *sdata, const char *username) +{ + bool dummy = false; + + return get_create_user(sdata, username, &dummy); +} + +static worker_instance_t *__create_worker(user_instance_t *user, const char *workername) +{ + worker_instance_t *worker = ckzalloc(sizeof(worker_instance_t)); + + worker->workername = strdup(workername); + worker->user_instance = user; + DL_APPEND(user->worker_instances, worker); + worker->start_time = time(NULL); + return worker; +} + +static worker_instance_t *__get_worker(user_instance_t *user, const char *workername) +{ + worker_instance_t *worker = NULL, *tmp; + + DL_FOREACH(user->worker_instances, tmp) { + if (!safecmp(workername, tmp->workername)) { + worker = tmp; + break; + } + } + return worker; +} + +/* Find worker amongst a user's workers by workername or create one if it + * doesn't yet exist. */ +static worker_instance_t *get_create_worker(sdata_t *sdata, user_instance_t *user, + const char *workername, bool *new_worker) +{ + worker_instance_t *worker; + + ck_wlock(&sdata->instance_lock); + worker = __get_worker(user, workername); + if (!worker) { + worker = __create_worker(user, workername); + *new_worker = true; + } + ck_wunlock(&sdata->instance_lock); + + return worker; +} + +static worker_instance_t *get_worker(sdata_t *sdata, user_instance_t *user, const char *workername) +{ + bool dummy = false; + + return get_create_worker(sdata, user, workername, &dummy); +} + +/* This simply strips off the first part of the workername and matches it to a + * user or creates a new one. Needs to be entered with client holding a ref + * count. */ +static user_instance_t *generate_user(ckpool_t *ckp, stratum_instance_t *client, + const char *workername) +{ + char *base_username = strdupa(workername), *username; + bool new_user = false, new_worker = false; + sdata_t *sdata = ckp->sdata; + worker_instance_t *worker; + user_instance_t *user; + int len; + + username = strsep(&base_username, "._"); + if (!username || !strlen(username)) + username = base_username; + len = strlen(username); + if (unlikely(len > 127)) + username[127] = '\0'; + + user = get_create_user(sdata, username, &new_user); + worker = get_create_worker(sdata, user, workername, &new_worker); + + /* Create one worker instance for combined data from workers of the + * same name */ + ck_wlock(&sdata->instance_lock); + client->user_instance = user; + client->worker_instance = worker; + DL_APPEND2(user->clients, client, user_prev, user_next); + __inc_worker(sdata,user, worker); + ck_wunlock(&sdata->instance_lock); + + if (!ckp->proxy && (new_user || !user->btcaddress)) { + /* Is this a btc address based username? */ + if (generator_checkaddr(ckp, username, &user->script, &user->segwit)) { + user->btcaddress = true; + user->txnlen = address_to_txn(user->txnbin, username, user->script, user->segwit); + } + } + if (new_user) { + LOGNOTICE("Added new user %s%s", username, user->btcaddress ? + " as address based registration" : ""); + } + + return user; +} + +static void check_global_user(ckpool_t *ckp, user_instance_t *user, stratum_instance_t *client) +{ + sdata_t *sdata = ckp->sdata; + proxy_t *proxy = best_proxy(sdata); + int proxyid = proxy->id; + char buf[256]; + + sprintf(buf, "globaluser=%d:%d:%"PRId64":%s,%s", proxyid, user->id, client->id, + user->username, client->password); + send_proc(ckp->generator,buf); +} + +/* Manage the response to auth, client must hold ref */ +static void client_auth(ckpool_t *ckp, stratum_instance_t *client, user_instance_t *user, + const bool ret) +{ + if (ret) { + client->authorised = ret; + user->authorised = ret; + if (ckp->proxy) { + LOGNOTICE("Authorised client %s to proxy %d:%d, worker %s as user %s", + client->identity, client->proxyid, client->subproxyid, + client->workername, user->username); + if (ckp->userproxy) + check_global_user(ckp, user, client); + } else { + LOGNOTICE("Authorised client %s %s worker %s as user %s", + client->identity, client->address, client->workername, + user->username); + } + user->failed_authtime = 0; + user->auth_backoff = DEFAULT_AUTH_BACKOFF; /* Reset auth backoff time */ + user->throttled = false; + if (!user->auth_time) + user->auth_time = time(NULL); + } else { + if (user->throttled) { + LOGINFO("Client %s %s worker %s failed to authorise as throttled user %s", + client->identity, client->address, client->workername, + user->username); + } else { + LOGNOTICE("Client %s %s worker %s failed to authorise as user %s", + client->identity, client->address, client->workername, + user->username); + } + user->failed_authtime = time(NULL); + user->auth_backoff <<= 1; + /* Cap backoff time to 10 mins */ + if (user->auth_backoff > 600) + user->auth_backoff = 600; + client->reject = 3; + } + /* We can set this outside of lock safely */ + client->authorising = false; +} + +static json_t *__user_notify(const workbase_t *wb, const user_instance_t *user, const bool clean); + +static void update_solo_client(sdata_t *sdata, workbase_t *wb, const int64_t client_id, + user_instance_t *user_instance) +{ + json_t *json_msg = __user_notify(wb, user_instance, true); + + stratum_add_send(sdata, json_msg, client_id, SM_UPDATE); +} + +/* Needs to be entered with client holding a ref count. */ +static json_t *parse_authorise(stratum_instance_t *client, const json_t *params_val, + json_t **err_val) +{ + user_instance_t *user; + ckpool_t *ckp = client->ckp; + const char *buf, *pass; + bool ret = false; + int arr_size; + ts_t now; + + if (unlikely(!json_is_array(params_val))) { + *err_val = json_string("params not an array"); + goto out; + } + arr_size = json_array_size(params_val); + if (unlikely(arr_size < 1)) { + *err_val = json_string("params missing array entries"); + goto out; + } + if (unlikely(!client->useragent)) { + *err_val = json_string("Failed subscription"); + goto out; + } + buf = json_string_value(json_array_get(params_val, 0)); + if (!buf) { + *err_val = json_string("Invalid workername parameter"); + goto out; + } + if (!strlen(buf)) { + *err_val = json_string("Empty workername parameter"); + goto out; + } + if (!memcmp(buf, ".", 1) || !memcmp(buf, "_", 1)) { + *err_val = json_string("Empty username parameter"); + goto out; + } + if (strchr(buf, '/')) { + *err_val = json_string("Invalid character in username"); + goto out; + } + pass = json_string_value(json_array_get(params_val, 1)); + user = generate_user(ckp, client, buf); + client->user_id = user->id; + ts_realtime(&now); + client->start_time = now.tv_sec; + /* NOTE workername is NULL prior to this so should not be used in code + * till after this point */ + client->workername = strdup(buf); + if (pass) + client->password = strndup(pass, 64); + else + client->password = strdup(""); + if (user->failed_authtime) { + time_t now_t = time(NULL); + + if (now_t < user->failed_authtime + user->auth_backoff) { + if (!user->throttled) { + user->throttled = true; + LOGNOTICE("Client %s %s worker %s rate limited due to failed auth attempts", + client->identity, client->address, buf); + } else{ + LOGINFO("Client %s %s worker %s rate limited due to failed auth attempts", + client->identity, client->address, buf); + } + client->dropped = true; + goto out; + } + } + if (!ckp->btcsolo || client->user_instance->btcaddress) + ret = true; + + /* We do the preauth etc. in remote mode, and leave final auth to + * upstream pool to complete. */ + if (!ckp->remote || ckp->btcsolo) + client_auth(ckp, client, user, ret); +out: + if (ckp->btcsolo && ret && !client->remote) { + sdata_t *sdata = ckp->sdata; + workbase_t *wb; + + /* To avoid grabbing recursive lock */ + ck_wlock(&sdata->workbase_lock); + wb = sdata->current_workbase; + wb->readcount++; + ck_wunlock(&sdata->workbase_lock); + + ck_wlock(&sdata->instance_lock); + __generate_userwb(sdata, wb, user); + ck_wunlock(&sdata->instance_lock); + + update_solo_client(sdata, wb, client->id, user); + + ck_wlock(&sdata->workbase_lock); + wb->readcount--; + ck_wunlock(&sdata->workbase_lock); + + stratum_send_diff(sdata, client); + } + return json_boolean(ret); +} + +/* Needs to be entered with client holding a ref count. */ +static void stratum_send_diff(sdata_t *sdata, const stratum_instance_t *client) +{ + json_t *json_msg; + + JSON_CPACK(json_msg, "{s[I]soss}", "params", client->diff, "id", json_null(), + "method", "mining.set_difficulty"); + stratum_add_send(sdata, json_msg, client->id, SM_DIFF); +} + +/* Needs to be entered with client holding a ref count. */ +static void stratum_send_message(sdata_t *sdata, const stratum_instance_t *client, const char *msg) +{ + json_t *json_msg; + + /* Only send messages to whitelisted clients */ + if (!client->messages) + return; + JSON_CPACK(json_msg, "{sosss[s]}", "id", json_null(), "method", "client.show_message", + "params", msg); + stratum_add_send(sdata, json_msg, client->id, SM_MSG); +} + +static double time_bias(const double tdiff, const double period) +{ + double dexp = tdiff / period; + + /* Sanity check to prevent silly numbers for double accuracy **/ + if (unlikely(dexp > 36)) + dexp = 36; + return 1.0 - 1.0 / exp(dexp); +} + +/* Needs to be entered with client holding a ref count. */ +static void add_submit(ckpool_t *ckp, stratum_instance_t *client, const double diff, const bool valid, + const bool submit) +{ + sdata_t *ckp_sdata = ckp->sdata, *sdata = client->sdata; + worker_instance_t *worker = client->worker_instance; + double tdiff, bdiff, dsps, drr, network_diff, bias; + user_instance_t *user = client->user_instance; + int64_t next_blockid, optimal, mindiff; + tv_t now_t; + + mutex_lock(&ckp_sdata->uastats_lock); + if (valid) { + ckp_sdata->stats.unaccounted_shares++; + ckp_sdata->stats.unaccounted_diff_shares += diff; + } else + ckp_sdata->stats.unaccounted_rejects += diff; + mutex_unlock(&ckp_sdata->uastats_lock); + + /* Count only accepted and stale rejects in diff calculation. */ + if (valid) { + worker->shares += diff; + user->shares += diff; + } else if (!submit) + return; + + tv_time(&now_t); + + ck_rlock(&sdata->workbase_lock); + next_blockid = sdata->workbase_id + 1; + if (ckp->proxy) + network_diff = sdata->current_workbase->diff; + else + network_diff = sdata->current_workbase->network_diff; + ck_runlock(&sdata->workbase_lock); + + if (unlikely(!client->first_share.tv_sec)) { + copy_tv(&client->first_share, &now_t); + copy_tv(&client->ldc, &now_t); + } + + decay_client(client, diff, &now_t); + copy_tv(&client->last_share, &now_t); + + decay_worker(worker, diff, &now_t); + copy_tv(&worker->last_share, &now_t); + worker->idle = false; + + decay_user(user, diff, &now_t); + copy_tv(&user->last_share, &now_t); + client->idle = false; + + /* Once we've updated user/client statistics in node mode, we can't + * alter diff ourselves. */ + if (ckp->node) + return; + + client->ssdc++; + bdiff = sane_tdiff(&now_t, &client->first_share); + bias = time_bias(bdiff, 300); + tdiff = sane_tdiff(&now_t, &client->ldc); + + /* Check the difficulty every 240 seconds or as many shares as we + * should have had in that time, whichever comes first. */ + if (client->ssdc < 72 && tdiff < 240) + return; + + if (diff != client->diff) { + client->ssdc = 0; + return; + } + + /* Diff rate ratio */ + dsps = client->dsps5 / bias; + drr = dsps / (double)client->diff; + + /* Optimal rate product is 0.3, allow some hysteresis. */ + if (drr > 0.15 && drr < 0.4) + return; + + /* Client suggest diff overrides worker mindiff */ + if (client->suggest_diff) + mindiff = client->suggest_diff; + else + mindiff = worker->mindiff; + /* Allow slightly lower diffs when users choose their own mindiff */ + if (mindiff) { + if (drr < 0.5) + return; + optimal = lround(dsps * 2.4); + } else + optimal = lround(dsps * 3.33); + + /* Clamp to mindiff ~ network_diff */ + + /* Set to higher of pool mindiff and optimal */ + optimal = MAX(optimal, ckp->mindiff); + + /* Set to higher of optimal and user chosen diff */ + optimal = MAX(optimal, mindiff); + + /* Set to lower of optimal and pool maxdiff */ + if (ckp->maxdiff) + optimal = MIN(optimal, ckp->maxdiff); + + /* Set to lower of optimal and network_diff */ + optimal = MIN(optimal, network_diff); + + if (unlikely(optimal < 1)) + return; + + if (client->diff == optimal) + return; + + /* If this is the first share in a change, reset the last diff change + * to make sure the client hasn't just fallen back after a leave of + * absence */ + if (optimal < client->diff && client->ssdc == 1) { + copy_tv(&client->ldc, &now_t); + return; + } + + client->ssdc = 0; + + LOGINFO("Client %s biased dsps %.2f dsps %.2f drr %.2f adjust diff from %"PRId64" to: %"PRId64" ", + client->identity, dsps, client->dsps5, drr, client->diff, optimal); + + copy_tv(&client->ldc, &now_t); + client->diff_change_job_id = next_blockid; + client->old_diff = client->diff; + client->diff = optimal; + stratum_send_diff(sdata, client); +} + +static void +downstream_block(ckpool_t *ckp, sdata_t *sdata, const json_t *val, const int cblen, + const char *coinbase, const uchar *data) +{ + json_t *block_val = json_deep_copy(val); + + /* Strip unnecessary fields and add extra fields needed */ + json_set_string(block_val, "method", stratum_msgs[SM_BLOCK]); + add_remote_blockdata(ckp, block_val, cblen, coinbase, data); + downstream_json(sdata, block_val, 0, SSEND_PREPEND); + json_decref(block_val); +} + +/* We should already be holding a wb readcount. Needs to be entered with + * client holding a ref count. */ +static void +test_blocksolve(const stratum_instance_t *client, const workbase_t *wb, const uchar *data, + const uchar *hash, const double diff, const char *coinbase, int cblen, + const char *nonce2, const char *nonce, const uint32_t ntime32, const uint32_t version_mask, + const bool stale) +{ + char blockhash[68], cdfield[64], *gbt_block; + sdata_t *sdata = client->sdata; + ckpool_t *ckp = wb->ckp; + double network_diff; + json_t *val = NULL; + uchar flip32[32]; + ts_t ts_now; + bool ret; + + /* Submit anything over 99.9% of the diff in case of rounding errors */ + network_diff = sdata->current_workbase->network_diff * 0.999; + if (likely(diff < network_diff)) + return; + + LOGWARNING("Possible %sblock solve diff %lf !", stale ? "stale share " : "", diff); + /* Can't submit a block in proxy mode without the transactions */ + if (!ckp->node && wb->proxy) + return; + + ts_realtime(&ts_now); + sprintf(cdfield, "%lu,%lu", ts_now.tv_sec, ts_now.tv_nsec); + + gbt_block = process_block(wb, coinbase, cblen, data, hash, flip32, blockhash); + send_node_block(ckp, sdata, client->enonce1, nonce, nonce2, ntime32, version_mask, + wb->id, diff, client->id, coinbase, cblen, data); + + val = json_object(); + json_set_int(val, "height", wb->height); + json_set_string(val,"blockhash", blockhash); + json_set_string(val,"confirmed", "n"); + json_set_int64(val, "workinfoid", wb->id); + json_set_string(val, "username", client->user_instance->username); + json_set_string(val, "workername", client->workername); + if (ckp->remote) + json_set_int64(val, "clientid", client->virtualid); + else + json_set_int64(val, "clientid", client->id); + json_set_string(val, "enonce1", client->enonce1); + json_set_string(val, "nonce2", nonce2); + json_set_string(val, "nonce", nonce); + json_set_uint32(val, "ntime32", ntime32); + json_set_uint32(val, "version_mask", version_mask); + json_set_int64(val, "reward", wb->coinbasevalue); + json_set_double(val, "diff", diff); + json_set_string(val, "createdate", cdfield); + json_set_string(val, "createby", "code"); + json_set_string(val, "createcode", __func__); + json_set_string(val, "createinet", ckp->serverurl[client->server]); + + if (ckp->remote) { + add_remote_blockdata(ckp, val, cblen, coinbase, data); + upstream_json_msgtype(ckp, val, SM_BLOCK); + } else { + downstream_block(ckp, sdata, val, cblen, coinbase, data); + } + + /* Submit block locally after sending it to remote locations avoiding + * the delay of local verification */ + ret = local_block_submit(ckp, gbt_block, flip32, wb->height); + if (ret) + block_solve(ckp, val); + else + block_reject(val); + + json_decref(val); +} + +/* Entered with instance_lock held */ +static inline uchar *__user_coinb2(const stratum_instance_t *client, const workbase_t *wb, int *cb2len) +{ + struct userwb *userwb; + int64_t id; + + if (!client->ckp->btcsolo) + goto out_nouserwb; + + id = wb->id; + HASH_FIND_I64(client->user_instance->userwbs, &id, userwb); + if (unlikely(!userwb)) + goto out_nouserwb; + *cb2len = userwb->coinb2len; + return userwb->coinb2bin; + +out_nouserwb: + *cb2len = wb->coinb2len; + return wb->coinb2bin; +} + +/* Needs to be entered with workbase readcount and client holding a ref count. */ +static double submission_diff(sdata_t *sdata, const stratum_instance_t *client, const workbase_t *wb, + const char *nonce2, const uint32_t ntime32, uint32_t version_mask, + const char *nonce, uchar *hash, const bool stale) +{ + unsigned char merkle_root[32], merkle_sha[64]; + uint32_t *data32, *swap32, benonce32; + char *coinbase, data[80]; + uchar swap[80], hash1[32]; + int cblen, i, cb2len; + uchar *coinb2bin; + double ret; + + /* Leave ample enough room for donation generation address (~25) + length counter + user generation + * wb->coinb1len + wb->enonce1constlen + wb->enonce1varlen + wb->enonce2varlen + wb->coinb2len + 25 + cb2len */ + + coinbase = alloca(1024); + memcpy(coinbase, wb->coinb1bin, wb->coinb1len); + cblen = wb->coinb1len; + memcpy(coinbase + cblen, &client->enonce1bin, wb->enonce1constlen + wb->enonce1varlen); + cblen += wb->enonce1constlen + wb->enonce1varlen; + hex2bin(coinbase + cblen, nonce2, wb->enonce2varlen); + cblen += wb->enonce2varlen; + + ck_rlock(&sdata->instance_lock); + coinb2bin = __user_coinb2(client, wb, &cb2len); + memcpy(coinbase + cblen, coinb2bin, cb2len); + ck_runlock(&sdata->instance_lock); + + cblen += cb2len; + + gen_hash((uchar *)coinbase, merkle_root, cblen); + memcpy(merkle_sha, merkle_root, 32); + for (i = 0; i < wb->merkles; i++) { + memcpy(merkle_sha + 32, &wb->merklebin[i], 32); + gen_hash(merkle_sha, merkle_root, 64); + memcpy(merkle_sha, merkle_root, 32); + } + data32 = (uint32_t *)merkle_sha; + swap32 = (uint32_t *)merkle_root; + flip_32(swap32, data32); + + /* Copy the cached header binary and insert the merkle root */ + memcpy(data, wb->headerbin, 80); + memcpy(data + 36, merkle_root, 32); + + /* Update nVersion when version_mask is in use */ + if (version_mask) { + version_mask = htobe32(version_mask); + data32 = (uint32_t *)data; + *data32 |= version_mask; + } + + /* Insert the nonce value into the data */ + hex2bin(&benonce32, nonce, 4); + data32 = (uint32_t *)(data + 64 + 12); + *data32 = benonce32; + + /* Insert the ntime value into the data */ + data32 = (uint32_t *)(data + 68); + *data32 = htobe32(ntime32); + + /* Hash the share */ + data32 = (uint32_t *)data; + swap32 = (uint32_t *)swap; + flip_80(swap32, data32); + sha256(swap, 80, hash1); + sha256(hash1, 32, hash); + + /* Calculate the diff of the share here */ + ret = diff_from_target(hash); + + /* Test we haven't solved a block regardless of share status */ + test_blocksolve(client, wb, swap, hash, ret, coinbase, cblen, nonce2, nonce, ntime32, version_mask, stale); + + return ret; +} + +/* Optimised for the common case where shares are new */ +static bool new_share(sdata_t *sdata, const uchar *hash, const int64_t wb_id) +{ + share_t *share = ckzalloc(sizeof(share_t)), *match = NULL; + bool ret = true; + + memcpy(share->hash, hash, 32); + share->workbase_id = wb_id; + + mutex_lock(&sdata->share_lock); + sdata->shares_generated++; + HASH_FIND(hh, sdata->shares, hash, 32, match); + if (likely(!match)) + HASH_ADD(hh, sdata->shares, hash, 32, share); + mutex_unlock(&sdata->share_lock); + + if (unlikely(match)) { + dealloc(share); + ret = false; + } + return ret; +} + +static void update_client(const stratum_instance_t *client, const int64_t client_id); + +/* Submit a share in proxy mode to the parent pool. workbase_lock is held. + * Needs to be entered with client holding a ref count. */ +static void submit_share(stratum_instance_t *client, const int64_t jobid, const char *nonce2, + const char *ntime, const char *nonce) +{ + ckpool_t *ckp = client->ckp; + json_t *json_msg; + char enonce2[32]; + + sprintf(enonce2, "%s%s", client->enonce1var, nonce2); + JSON_CPACK(json_msg, "{sIsssssssIsIsi}", "jobid", jobid, "nonce2", enonce2, + "ntime", ntime, "nonce", nonce, "client_id", client->id, + "proxy", client->proxyid, "subproxy", client->subproxyid); + generator_add_send(ckp, json_msg); +} + +static void check_best_diff(sdata_t *sdata, user_instance_t *user,worker_instance_t *worker, + const double sdiff, stratum_instance_t *client) +{ + char buf[512]; + bool best_ever = false, best_worker = false, best_user = false; + + if (sdiff > user->best_ever) { + user->best_ever = sdiff; + best_ever = true; + } + if (sdiff > worker->best_ever) { + worker->best_ever = sdiff; + best_ever = true; + } + if (sdiff > worker->best_diff) { + worker->best_diff = sdiff; + best_worker = true; + } + if (sdiff > user->best_diff) { + user->best_diff = sdiff; + best_user = true; + } + /* Check against pool's best diff unlocked first, then recheck once + * the mutex is locked. */ + if (best_user && sdiff > sdata->stats.best_diff) { + /* Don't set pool best diff if it's a block since we will have + * reset it to zero. */ + mutex_lock(&sdata->stats_lock); + if (unlikely(sdiff > sdata->stats.best_diff && sdiff < sdata->current_workbase->network_diff)) + sdata->stats.best_diff = sdiff; + mutex_unlock(&sdata->stats_lock); + } + if (likely((!best_user && !best_worker) || !client)) + return; + snprintf(buf, 511, "New best %sshare for %s: %lf", best_ever ? "ever " : "", + best_user ? "user" : "worker", sdiff); + stratum_send_message(sdata, client, buf); +} + +#define JSON_ERR(err) json_string(SHARE_ERR(err)) + +/* Needs to be entered with client holding a ref count. */ +static json_t *parse_submit(stratum_instance_t *client, json_t *json_msg, + const json_t *params_val, json_t **err_val) +{ + bool share = false, result = false, invalid = true, submit = false, stale = false; + const char *workername, *job_id, *ntime, *version_mask; + double diff = client->diff, wdiff = 0, sdiff = -1; + char hexhash[68] = {}, sharehash[32], cdfield[64]; + user_instance_t *user = client->user_instance; + char *fname = NULL, *s, *nonce, *nonce2; + uint32_t ntime32, version_mask32 = 0; + sdata_t *sdata = client->sdata; + enum share_err err = SE_NONE; + ckpool_t *ckp = client->ckp; + char idstring[24] = {}; + workbase_t *wb = NULL; + uchar hash[32]; + int nlen, len; + time_t now_t; + json_t *val; + int64_t id; + ts_t now; + FILE *fp; + + ts_realtime(&now); + now_t = now.tv_sec; + sprintf(cdfield, "%lu,%lu", now.tv_sec, now.tv_nsec); + + if (unlikely(!json_is_array(params_val))) { + err = SE_NOT_ARRAY; + *err_val = JSON_ERR(err); + goto out; + } + if (unlikely(json_array_size(params_val) < 5)) { + err = SE_INVALID_SIZE; + *err_val = JSON_ERR(err); + goto out; + } + workername = json_string_value(json_array_get(params_val, 0)); + if (unlikely(!workername || !strlen(workername))) { + err = SE_NO_USERNAME; + *err_val = JSON_ERR(err); + goto out; + } + job_id = json_string_value(json_array_get(params_val, 1)); + if (unlikely(!job_id || !strlen(job_id))) { + err = SE_NO_JOBID; + *err_val = JSON_ERR(err); + goto out; + } + nonce2 = (char *)json_string_value(json_array_get(params_val, 2)); + if (unlikely(!nonce2 || !strlen(nonce2) || !validhex(nonce2))) { + err = SE_NO_NONCE2; + *err_val = JSON_ERR(err); + goto out; + } + ntime = json_string_value(json_array_get(params_val, 3)); + if (unlikely(!ntime || !strlen(ntime) || !validhex(ntime))) { + err = SE_NO_NTIME; + *err_val = JSON_ERR(err); + goto out; + } + nonce = (char *)json_string_value(json_array_get(params_val, 4)); + if (unlikely(!nonce || strlen(nonce) < 8 || !validhex(nonce))) { + err = SE_NO_NONCE; + *err_val = JSON_ERR(err); + goto out; + } + + version_mask = json_string_value(json_array_get(params_val, 5)); + if (version_mask && strlen(version_mask) && validhex(version_mask)) { + sscanf(version_mask, "%x", &version_mask32); + // check version mask + if (version_mask32 && ((~ckp->version_mask) & version_mask32) != 0) { + // means client changed some bits which server doesn't allow to change + err = SE_INVALID_VERSION_MASK; + *err_val = JSON_ERR(err); + goto out; + } + } + if (safecmp(workername, client->workername)) { + err = SE_WORKER_MISMATCH; + *err_val = JSON_ERR(err); + goto out; + } + sscanf(job_id, "%lx", &id); + sscanf(ntime, "%x", &ntime32); + + share = true; + + if (unlikely(!sdata->current_workbase)) + return json_boolean(false); + + wb = get_workbase(sdata, id); + if (unlikely(!wb)) { + id = sdata->current_workbase->id; + err = SE_INVALID_JOBID; + json_set_string(json_msg, "reject-reason", SHARE_ERR(err)); + strncpy(idstring, job_id, 19); + ASPRINTF(&fname, "%s.sharelog", sdata->current_workbase->logdir); + goto out_nowb; + } + wdiff = wb->diff; + strncpy(idstring, wb->idstring, 20); + ASPRINTF(&fname, "%s.sharelog", wb->logdir); + /* Fix broken clients sending too many chars. Nonce2 is part of the + * read only json so use a temporary variable and modify it. */ + len = wb->enonce2varlen * 2; + nlen = strlen(nonce2); + if (unlikely(nlen != len)) { + if (nlen > len) { + nonce2 = strdupa(nonce2); + nonce2[len] = '\0'; + } else if (nlen < len) { + char *tmp = nonce2; + + nonce2 = strdupa("0000000000000000"); + memcpy(nonce2, tmp, nlen); + nonce2[len] = '\0'; + } + } + /* Same with nonce, but we need at least 8 chars. We checked for this + * earlier. */ + len = 8; + nlen = strlen(nonce); + if (unlikely(nlen > len)) { + nonce = strdupa(nonce); + nonce[len] = '\0'; + } + if (id < sdata->blockchange_id) + stale = true; + sdiff = submission_diff(sdata, client, wb, nonce2, ntime32, version_mask32, nonce, hash, stale); + if (sdiff > client->best_diff) { + worker_instance_t *worker = client->worker_instance; + + client->best_diff = sdiff; + LOGINFO("User %s worker %s client %s new best diff %lf", user->username, + worker->workername, client->identity, sdiff); + check_best_diff(sdata, user, worker, sdiff, client); + } + bswap_256(sharehash, hash); + __bin2hex(hexhash, sharehash, 32); + + if (stale) { + /* Accept shares if they're received on remote nodes before the + * workbase was retired. */ + if (client->latency) { + int latency; + tv_t now_tv; + + ts_to_tv(&now_tv, &now); + latency = ms_tvdiff(&now_tv, &wb->retired); + if (latency < client->latency) { + LOGDEBUG("Accepting %dms late share from client %s", + latency, client->identity); + goto no_stale; + } + } + err = SE_STALE; + json_set_string(json_msg, "reject-reason", SHARE_ERR(err)); + goto out_submit; + } +no_stale: + /* Ntime cannot be less, but allow forward ntime rolling up to max */ + if (ntime32 < wb->ntime32 || ntime32 > wb->ntime32 + 7000) { + err = SE_NTIME_INVALID; + json_set_string(json_msg, "reject-reason", SHARE_ERR(err)); + goto out_put; + } + invalid = false; +out_submit: + if (sdiff >= wdiff) + submit = true; + if (unlikely(sdiff >= sdata->current_workbase->network_diff)) { + /* Make sure we always submit any possible block solve */ + LOGWARNING("Submitting possible block solve share diff %lf !", sdiff); + submit = true; + } +out_put: + put_workbase(sdata, wb); +out_nowb: + + /* Accept shares of the old diff until the next update */ + if (id < client->diff_change_job_id) + diff = client->old_diff; + if (!invalid) { + char wdiffsuffix[16]; + + suffix_string(wdiff, wdiffsuffix, 16, 0); + if (sdiff >= diff) { + if (new_share(sdata, hash, id)) { + LOGINFO("Accepted client %s share diff %.1f/%.0f/%s: %s", + client->identity, sdiff, diff, wdiffsuffix, hexhash); + result = true; + } else { + err = SE_DUPE; + json_set_string(json_msg, "reject-reason", SHARE_ERR(err)); + LOGINFO("Rejected client %s dupe diff %.1f/%.0f/%s: %s", + client->identity, sdiff, diff, wdiffsuffix, hexhash); + submit = false; + } + } else { + err = SE_HIGH_DIFF; + LOGINFO("Rejected client %s high diff %.1f/%.0f/%s: %s", + client->identity, sdiff, diff, wdiffsuffix, hexhash); + json_set_string(json_msg, "reject-reason", SHARE_ERR(err)); + submit = false; + } + } else + LOGINFO("Rejected client %s invalid share %s", client->identity, SHARE_ERR(err)); + + /* Submit share to upstream pool in proxy mode. We submit valid and + * stale shares and filter out the rest. */ + if (wb && wb->proxy && submit) { + LOGINFO("Submitting share upstream: %s", hexhash); + submit_share(client, id, nonce2, ntime, nonce); + } + + add_submit(ckp, client, diff, result, submit); + + /* Now write to the pool's sharelog. */ + val = json_object(); + json_set_int(val, "workinfoid", id); + if (ckp->remote) + json_set_int64(val, "clientid", client->virtualid); + else + json_set_int64(val, "clientid", client->id); + json_set_string(val, "enonce1", client->enonce1); + json_set_string(val, "nonce2", nonce2); + json_set_string(val, "nonce", nonce); + json_set_string(val, "ntime", ntime); + json_set_double(val, "diff", diff); + json_set_double(val, "sdiff", sdiff); + json_set_string(val, "hash", hexhash); + json_set_bool(val, "result", result); + json_object_set(val, "reject-reason", json_object_get(json_msg, "reject-reason")); + json_object_set(val, "error", *err_val); + json_set_int(val, "errn", err); + json_set_string(val, "createdate", cdfield); + json_set_string(val, "createby", "code"); + json_set_string(val, "createcode", __func__); + json_set_string(val, "createinet", ckp->serverurl[client->server]); + json_set_string(val, "workername", client->workername); + json_set_string(val, "username", user->username); + json_set_string(val, "address", client->address); + json_set_string(val, "agent", client->useragent); + + if (ckp->logshares) { + fp = fopen(fname, "ae"); + if (likely(fp)) { + s = json_dumps(val, JSON_EOL); + len = strlen(s); + len = fprintf(fp, "%s", s); + free(s); + fclose(fp); + if (unlikely(len < 0)) + LOGERR("Failed to fwrite to %s", fname); + } else + LOGERR("Failed to fopen %s", fname); + } + if (ckp->remote) + upstream_json_msgtype(ckp, val, SM_SHARE); + json_decref(val); +out: + if (!sdata->wbincomplete && ((!result && !submit) || !share)) { + /* Is this the first in a run of invalids? */ + if (client->first_invalid < client->last_share.tv_sec || !client->first_invalid) + client->first_invalid = now_t; + else if (client->first_invalid && client->first_invalid < now_t - 180 && client->reject < 3) { + LOGNOTICE("Client %s rejecting for 180s, disconnecting", client->identity); + if (ckp->node) + connector_drop_client(ckp, client->id); + else + stratum_send_message(sdata, client, "Disconnecting for continuous invalid shares"); + client->reject = 3; + } else if (client->first_invalid && client->first_invalid < now_t - 120 && client->reject < 2) { + LOGNOTICE("Client %s rejecting for 120s, reconnecting", client->identity); + stratum_send_message(sdata, client, "Reconnecting for continuous invalid shares"); + reconnect_client(sdata, client); + client->reject = 2; + } else if (client->first_invalid && client->first_invalid < now_t - 60 && !client->reject) { + LOGNOTICE("Client %s rejecting for 60s, sending update", client->identity); + update_client(client, client->id); + client->reject = 1; + } + } else if (client->reject < 3) { + client->first_invalid = 0; + client->reject = 0; + } + + if (!share) { + if (ckp->remote) { + val = json_object(); + if (ckp->remote) + json_set_int64(val, "clientid", client->virtualid); + else + json_set_int64(val, "clientid", client->id); + if (user->secondaryuserid) + json_set_string(val, "secondaryuserid", user->secondaryuserid); + json_set_string(val, "enonce1", client->enonce1); + json_set_int(val, "workinfoid", sdata->current_workbase->id); + json_set_string(val, "workername", client->workername); + json_set_string(val, "username", user->username); + json_object_set(val, "error", *err_val); + json_set_int(val, "errn", err); + json_set_string(val, "createdate", cdfield); + json_set_string(val, "createby", "code"); + json_set_string(val, "createcode", __func__); + json_set_string(val, "createinet", ckp->serverurl[client->server]); + json_decref(val); + } + LOGINFO("Invalid share from client %s: %s", client->identity, client->workername); + } + free(fname); + return json_boolean(result); +} + +/* Must enter with workbase_lock held */ +static json_t *__stratum_notify(const workbase_t *wb, const bool clean) +{ + json_t *val; + + JSON_CPACK(val, "{s:[ssssosssb],s:o,s:s}", + "params", + wb->idstring, + wb->prevhash, + wb->coinb1, + wb->coinb2, + json_deep_copy(wb->merkle_array), + wb->bbversion, + wb->nbit, + wb->ntime, + clean, + "id", json_null(), + "method", "mining.notify"); + return val; +} + +static void stratum_broadcast_update(sdata_t *sdata, const workbase_t *wb, const bool clean) +{ + json_t *json_msg; + + ck_rlock(&sdata->workbase_lock); + json_msg = __stratum_notify(wb, clean); + ck_runlock(&sdata->workbase_lock); + + stratum_broadcast(sdata, json_msg, SM_UPDATE); +} + +/* For sending a single stratum template update */ +static void stratum_send_update(sdata_t *sdata, const int64_t client_id, const bool clean) +{ + ckpool_t *ckp = sdata->ckp; + json_t *json_msg; + + if (unlikely(!sdata->current_workbase)) { + if (!ckp->proxy) + LOGWARNING("No current workbase to send stratum update"); + else + LOGDEBUG("No current workbase to send stratum update for client %"PRId64, client_id); + return; + } + + ck_rlock(&sdata->workbase_lock); + json_msg = __stratum_notify(sdata->current_workbase, clean); + ck_runlock(&sdata->workbase_lock); + + stratum_add_send(sdata, json_msg, client_id, SM_UPDATE); +} + +/* Hold instance and workbase lock */ +static json_t *__user_notify(const workbase_t *wb, const user_instance_t *user, const bool clean) +{ + int64_t id = wb->id; + struct userwb *userwb; + json_t *val; + + HASH_FIND_I64(user->userwbs, &id, userwb); + if (unlikely(!userwb)) { + LOGINFO("Failed to find userwb in __user_notify!"); + return NULL; + } + + JSON_CPACK(val, "{s:[ssssosssb],s:o,s:s}", + "params", + wb->idstring, + wb->prevhash, + wb->coinb1, + userwb->coinb2, + json_deep_copy(wb->merkle_array), + wb->bbversion, + wb->nbit, + wb->ntime, + clean, + "id", json_null(), + "method", "mining.notify"); + return val; +} + +/* Sends a stratum update with a unique coinb2 for every client. Avoid + * recursive locking. */ +static void stratum_broadcast_updates(sdata_t *sdata, bool clean) +{ + stratum_instance_t *client, *tmp; + json_t *json_msg; + + ck_wlock(&sdata->instance_lock); + HASH_ITER(hh, sdata->stratum_instances, client, tmp) { + if (!client->user_instance) + continue; + __inc_instance_ref(client); + ck_wunlock(&sdata->instance_lock); + + ck_rlock(&sdata->workbase_lock); + json_msg = __user_notify(sdata->current_workbase, client->user_instance, clean); + ck_runlock(&sdata->workbase_lock); + + if (likely(json_msg)) + stratum_add_send(sdata, json_msg, client->id, SM_UPDATE); + + ck_wlock(&sdata->instance_lock); + __dec_instance_ref(client); + } + ck_wunlock(&sdata->instance_lock); +} + +static void send_json_err(sdata_t *sdata, const int64_t client_id, json_t *id_val, const char *err_msg) +{ + json_t *val; + + /* Some clients have no id_val so pass back an empty string. */ + if (unlikely(!id_val)) + JSON_CPACK(val, "{ssss}", "id", "", "error", err_msg); + else + JSON_CPACK(val, "{soss}", "id", json_deep_copy(id_val), "error", err_msg); + stratum_add_send(sdata, val, client_id, SM_ERROR); +} + +/* Needs to be entered with client holding a ref count. */ +static void update_client(const stratum_instance_t *client, const int64_t client_id) +{ + sdata_t *sdata = client->sdata; + + if (!client->ckp->btcsolo) + stratum_send_update(sdata, client_id, true); + stratum_send_diff(sdata, client); +} + +static json_params_t +*create_json_params(const int64_t client_id, const json_t *method, const json_t *params, + const json_t *id_val) +{ + json_params_t *jp = ckalloc(sizeof(json_params_t)); + + jp->method = json_deep_copy(method); + jp->params = json_deep_copy(params); + jp->id_val = json_deep_copy(id_val); + jp->client_id = client_id; + return jp; +} + +/* Implement support for the diff in the params as well as the originally + * documented form of placing diff within the method. Needs to be entered with + * client holding a ref count. */ +static void suggest_diff(ckpool_t *ckp, stratum_instance_t *client, const char *method, + const json_t *params_val) +{ + json_t *arr_val = json_array_get(params_val, 0); + int64_t sdiff; + + if (unlikely(!client_active(client))) { + LOGNOTICE("Attempted to suggest diff on unauthorised client %s", client->identity); + return; + } + if (arr_val && json_is_integer(arr_val)) + sdiff = json_integer_value(arr_val); + else if (sscanf(method, "mining.suggest_difficulty(%"PRId64, &sdiff) != 1) { + LOGINFO("Failed to parse suggest_difficulty for client %s", client->identity); + return; + } + /* Clamp suggest diff to global pool mindiff */ + if (sdiff < ckp->mindiff) + sdiff = ckp->mindiff; + if (sdiff == client->suggest_diff) + return; + client->suggest_diff = sdiff; + if (client->diff == sdiff) + return; + client->diff_change_job_id = client->sdata->workbase_id + 1; + client->old_diff = client->diff; + client->diff = sdiff; + stratum_send_diff(ckp->sdata, client); +} + +/* Send diff first when sending the first stratum template after subscribing */ +static void init_client(const stratum_instance_t *client, const int64_t client_id) +{ + sdata_t *sdata = client->sdata; + + stratum_send_diff(sdata, client); + if (!client->ckp->btcsolo) + stratum_send_update(sdata, client_id, true); +} + +/* When a node first connects it has no transactions so we have to send all + * current ones to it. */ +static void send_node_all_txns(sdata_t *sdata, const stratum_instance_t *client) +{ + json_t *txn_array, *val, *txn_val; + txntable_t *txn, *tmp; + smsg_t *msg; + + txn_array = json_array(); + + ck_rlock(&sdata->txn_lock); + HASH_ITER(hh, sdata->txns, txn, tmp) { + JSON_CPACK(txn_val, "{ss,ss}", "hash", txn->hash, "data", txn->data); + json_array_append_new(txn_array, txn_val); + } + ck_runlock(&sdata->txn_lock); + + if (client->trusted) { + JSON_CPACK(val, "{ss,so}", "method", stratum_msgs[SM_TRANSACTIONS], + "transaction", txn_array); + } else { + JSON_CPACK(val, "{ss,so}", "node.method", stratum_msgs[SM_TRANSACTIONS], + "transaction", txn_array); + } + msg = ckzalloc(sizeof(smsg_t)); + msg->json_msg = val; + msg->client_id = client->id; + ckmsgq_add(sdata->ssends, msg); + LOGNOTICE("Sending new node client %s all transactions", client->identity); +} + +static void *setup_node(void *arg) +{ + stratum_instance_t *client = (stratum_instance_t *)arg; + + pthread_detach(pthread_self()); + + client->latency = round_trip(client->address) / 2; + LOGNOTICE("Node client %s %s latency set to %dms", client->identity, + client->address, client->latency); + send_node_all_txns(client->sdata, client); + dec_instance_ref(client->sdata, client); + return NULL; +} + +/* Create a thread to asynchronously set latency to the node to not + * block. Increment the ref count to prevent the client pointer + * dereferencing under us, allowing the thread to decrement it again when + * finished. */ +static void add_mining_node(ckpool_t *ckp, sdata_t *sdata, stratum_instance_t *client) +{ + pthread_t pth; + + ck_wlock(&sdata->instance_lock); + client->node = true; + DL_APPEND2(sdata->node_instances, client, node_prev, node_next); + __inc_instance_ref(client); + ck_wunlock(&sdata->instance_lock); + + LOGWARNING("Added client %s %s as mining node on server %d:%s", client->identity, + client->address, client->server, ckp->serverurl[client->server]); + + create_pthread(&pth, setup_node, client); +} + +static void add_remote_server(sdata_t *sdata, stratum_instance_t *client) +{ + ck_wlock(&sdata->instance_lock); + client->trusted = true; + DL_APPEND2(sdata->remote_instances, client, remote_prev, remote_next); + __inc_instance_ref(client); + ck_wunlock(&sdata->instance_lock); + + send_node_all_txns(sdata, client); + dec_instance_ref(sdata, client); +} + +/* Enter with client holding ref count */ +static void parse_method(ckpool_t *ckp, sdata_t *sdata, stratum_instance_t *client, + const int64_t client_id, json_t *id_val, json_t *method_val, + json_t *params_val) +{ + const char *method; + + /* Random broken clients send something not an integer as the id so we + * copy the json item for id_val as is for the response. By far the + * most common messages will be shares so look for those first */ + method = json_string_value(method_val); + if (likely(cmdmatch(method, "mining.submit") && client->authorised)) { + json_params_t *jp = create_json_params(client_id, method_val, params_val, id_val); + + ckmsgq_add(sdata->sshareq, jp); + return; + } + + if (cmdmatch(method, "mining.term")) { + LOGDEBUG("Mining terminate requested from %s %s", client->identity, client->address); + drop_client(ckp, sdata, client_id); + return; + } + + if (cmdmatch(method, "mining.subscribe")) { + json_t *val, *result_val; + + if (unlikely(client->subscribed)) { + LOGNOTICE("Client %s %s trying to subscribe twice", + client->identity, client->address); + return; + } + result_val = parse_subscribe(client, client_id, params_val); + /* Shouldn't happen, sanity check */ + if (unlikely(!result_val)) { + LOGWARNING("parse_subscribe returned NULL result_val"); + return; + } + val = json_object(); + json_object_set_new_nocheck(val, "result", result_val); + json_object_set_nocheck(val, "id", id_val); + json_object_set_new_nocheck(val, "error", json_null()); + stratum_add_send(sdata, val, client_id, SM_SUBSCRIBERESULT); + if (likely(client->subscribed)) + init_client(client, client_id); + return; + } + + if (unlikely(cmdmatch(method, "mining.remote"))) { + char buf[256]; + + /* Add this client as a trusted remote node in the connector and + * drop the client in the stratifier */ + if (!ckp->trusted[client->server] || ckp->proxy) { + LOGNOTICE("Dropping client %s %s trying to authorise as remote node on non trusted server %d", + client->identity, client->address, client->server); + connector_drop_client(ckp, client_id); + } else { + snprintf(buf, 255, "remote=%"PRId64, client_id); + send_proc(ckp->connector, buf); + add_remote_server(sdata, client); + } + sprintf(client->identity, "remote:%"PRId64, client_id); + return; + } + + if (unlikely(cmdmatch(method, "mining.node"))) { + char buf[256]; + + /* Add this client as a passthrough in the connector and + * add it to the list of mining nodes in the stratifier */ + if (!ckp->nodeserver[client->server] || ckp->proxy) { + LOGNOTICE("Dropping client %s %s trying to authorise as node on non node server %d", + client->identity, client->address, client->server); + connector_drop_client(ckp, client_id); + drop_client(ckp, sdata, client_id); + } else { + snprintf(buf, 255, "passthrough=%"PRId64, client_id); + send_proc(ckp->connector, buf); + add_mining_node(ckp, sdata, client); + sprintf(client->identity, "node:%"PRId64, client_id); + } + return; + } + + if (unlikely(cmdmatch(method, "mining.passthrough"))) { + char buf[256]; + + if (ckp->proxy || ckp->node ) { + LOGNOTICE("Dropping client %s %s trying to connect as passthrough on unsupported server %d", + client->identity, client->address, client->server); + connector_drop_client(ckp, client_id); + drop_client(ckp, sdata, client_id); + } else { + /*Flag this as a passthrough and manage its messages + * accordingly. No data from this client id should ever + * come directly back to this stratifier. */ + LOGNOTICE("Adding passthrough client %s %s", client->identity, client->address); + client->passthrough = true; + snprintf(buf, 255, "passthrough=%"PRId64, client_id); + send_proc(ckp->connector, buf); + sprintf(client->identity, "passthrough:%"PRId64, client_id); + } + return; + } + + /* We shouldn't really allow unsubscribed users to authorise first but + * some broken stratum implementations do that and we can handle it. */ + if (cmdmatch(method, "mining.auth")) { + json_params_t *jp; + + if (unlikely(client->authorised)) { + LOGINFO("Client %s %s trying to authorise twice", + client->identity, client->address); + return; + } + jp = create_json_params(client_id, method_val, params_val, id_val); + ckmsgq_add(sdata->sauthq, jp); + return; + } + + if (cmdmatch(method, "mining.configure")) { + json_t *val, *result_val; + char version_str[12]; + + LOGINFO("Mining configure requested from %s %s", client->identity, + client->address); + sprintf(version_str, "%08x", ckp->version_mask); + val = json_object(); + JSON_CPACK(result_val, "{sbss}", "version-rolling", json_true(), + "version-rolling.mask", version_str); + json_object_set_new_nocheck(val, "result", result_val); + json_object_set_nocheck(val, "id", id_val); + json_object_set_new_nocheck(val, "error", json_null()); + stratum_add_send(sdata, val, client_id, SM_CONFIGURE); + return; + } + + /* We should only accept requests from subscribed and authed users here + * on */ + if (!client->subscribed) { + LOGINFO("Dropping %s from unsubscribed client %s %s", method, + client->identity, client->address); + connector_drop_client(ckp, client_id); + return; + } + + /* We should only accept authorised requests from here on */ + if (!client->authorised) { + LOGINFO("Dropping %s from unauthorised client %s %s", method, + client->identity, client->address); + return; + } + + if (cmdmatch(method, "mining.suggest")) { + suggest_diff(ckp, client, method, params_val); + return; + } + + /* Covers both get_transactions and get_txnhashes */ + if (cmdmatch(method, "mining.get")) { + json_params_t *jp = create_json_params(client_id, method_val, params_val, id_val); + + ckmsgq_add(sdata->stxnq, jp); + return; + } + + /* Unhandled message here */ + LOGINFO("Unhandled client %s %s method %s", client->identity, client->address, method); + return; +} + +static void free_smsg(smsg_t *msg) +{ + json_decref(msg->json_msg); + free(msg); +} + +/* Even though we check the results locally in node mode, check the upstream + * results in case of runs of invalids. */ +static void parse_share_result(ckpool_t *ckp, stratum_instance_t *client, json_t *val) +{ + time_t now_t; + ts_t now; + + if (likely(json_is_true(val))) { + client->upstream_invalid = 0; + return; + } + ts_realtime(&now); + now_t = now.tv_sec; + if (client->upstream_invalid < client->last_share.tv_sec || !client->upstream_invalid) + client->upstream_invalid = now_t; + else if (client->upstream_invalid && client->upstream_invalid < now_t - 150) { + LOGNOTICE("Client %s upstream rejects for 150s, disconnecting", client->identity); + connector_drop_client(ckp, client->id); + client->reject = 3; + } +} + +static void parse_diff(stratum_instance_t *client, json_t *val) +{ + double diff = json_number_value(json_array_get(val, 0)); + + LOGINFO("Set client %s to diff %lf", client->identity, diff); + client->diff = diff; +} + +static void parse_subscribe_result(stratum_instance_t *client, json_t *val) +{ + int len; + + strncpy(client->enonce1, json_string_value(json_array_get(val, 1)), 16); + len = strlen(client->enonce1) / 2; + hex2bin(client->enonce1bin, client->enonce1, len); + memcpy(&client->enonce1_64, client->enonce1bin, 8); + LOGINFO("Client %s got enonce1 %lx string %s", client->identity, client->enonce1_64, client->enonce1); +} + +static void parse_authorise_result(ckpool_t *ckp, sdata_t *sdata, stratum_instance_t *client, + json_t *val) +{ + if (!json_is_true(val)) { + LOGNOTICE("Client %s was not authorised upstream, dropping", client->identity); + client->authorised = false; + connector_drop_client(ckp, client->id); + drop_client(ckp, sdata, client->id); + } else + LOGINFO("Client %s was authorised upstream", client->identity); +} + +static int node_msg_type(json_t *val) +{ + const char *method; + int i, ret = -1; + + if (!val) + goto out; + method = json_string_value(json_object_get(val, "node.method")); + if (method) { + for (i = 0; i < SM_NONE; i++) { + if (!strcmp(method, stratum_msgs[i])) { + ret = i; + break; + } + } + json_object_del(val, "node.method"); + } else + method = json_string_value(json_object_get(val, "method")); + + if (ret < 0 && method) { + if (!safecmp(method, "mining.submit")) + ret = SM_SHARE; + else if (!safecmp(method, "mining.notify")) + ret = SM_UPDATE; + else if (!safecmp(method, "mining.subscribe")) + ret = SM_SUBSCRIBE; + else if (cmdmatch(method, "mining.auth")) + ret = SM_AUTH; + else if (cmdmatch(method, "mining.get")) + ret = SM_TXNS; + else if (cmdmatch(method, "mining.suggest_difficulty")) + ret = SM_SUGGESTDIFF; + else + ret = SM_NONE; + } +out: + return ret; +} + +static user_instance_t *generate_remote_user(ckpool_t *ckp, const char *workername) +{ + char *base_username = strdupa(workername), *username; + sdata_t *sdata = ckp->sdata; + bool new_user = false; + user_instance_t *user; + int len; + + username = strsep(&base_username, "._"); + if (!username || !strlen(username)) + username = base_username; + len = strlen(username); + if (unlikely(len > 127)) + username[127] = '\0'; + + user = get_create_user(sdata, username, &new_user); + + if (!ckp->proxy && (new_user || !user->btcaddress)) { + /* Is this a btc address based username? */ + if (generator_checkaddr(ckp, username, &user->script, &user->segwit)) { + user->btcaddress = true; + user->txnlen = address_to_txn(user->txnbin, username, user->script, user->segwit); + } + } + if (new_user) { + LOGNOTICE("Added new remote user %s%s", username, user->btcaddress ? + " as address based registration" : ""); + } + + return user; +} + +static void parse_remote_share(ckpool_t *ckp, sdata_t *sdata, json_t *val, const char *buf) +{ + json_t *workername_val = json_object_get(val, "workername"); + worker_instance_t *worker; + const char *workername; + double diff, sdiff = 0; + user_instance_t *user; + tv_t now_t; + + workername = json_string_value(workername_val); + if (unlikely(!workername_val || !workername)) { + LOGWARNING("Failed to get workername from remote message %s", buf); + return; + } + if (unlikely(!json_get_double(&diff, val, "diff") || diff < 0.000001)) { + LOGWARNING("Unable to parse valid diff from remote message %s", buf); + return; + } + json_get_double(&sdiff, val, "sdiff"); + user = generate_remote_user(ckp, workername); + user->authorised = true; + worker = get_worker(sdata, user, workername); + check_best_diff(sdata, user, worker, sdiff, NULL); + + mutex_lock(&sdata->uastats_lock); + sdata->stats.unaccounted_shares++; + sdata->stats.unaccounted_diff_shares += diff; + mutex_unlock(&sdata->uastats_lock); + + worker->shares += diff; + user->shares += diff; + tv_time(&now_t); + + decay_worker(worker, diff, &now_t); + copy_tv(&worker->last_share, &now_t); + worker->idle = false; + + decay_user(user, diff, &now_t); + copy_tv(&user->last_share, &now_t); + + LOGINFO("Added %.0lf remote shares to worker %s", diff, workername); +} + +static void parse_remote_shareerr(ckpool_t *ckp, json_t *val, const char *buf) +{ + const char *workername; + + workername = json_string_value(json_object_get(val, "workername")); + if (unlikely(!workername)) { + LOGWARNING("Failed to find workername in parse_remote_shareerr %s", buf); + return; + } + /* Return value ignored */ + generate_remote_user(ckp, workername); +} + +static void send_auth_response(sdata_t *sdata, const int64_t client_id, const bool ret, + json_t *id_val, json_t *err_val) +{ + json_t *json_msg = json_object(); + + json_object_set_new_nocheck(json_msg, "result", json_boolean(ret)); + json_object_set_new_nocheck(json_msg, "error", err_val ? err_val : json_null()); + json_object_set(json_msg, "id", id_val); + stratum_add_send(sdata, json_msg, client_id, SM_AUTHRESULT); +} + +static void send_auth_success(ckpool_t *ckp, sdata_t *sdata, stratum_instance_t *client) +{ + char *buf; + + ASPRINTF(&buf, "Authorised, welcome to %s %s!", ckp->name, + client->user_instance->username); + stratum_send_message(sdata, client, buf); + free(buf); +} + +static void send_auth_failure(sdata_t *sdata, stratum_instance_t *client) +{ + stratum_send_message(sdata, client, "Failed authorisation :("); +} + +/* For finding a client by its virtualid instead of client->id. This is an + * inefficient lookup but only occurs once on parsing a remote auth from the + * upstream pool on passthrough subclients. */ +static stratum_instance_t *ref_instance_by_virtualid(sdata_t *sdata, int64_t *client_id) +{ + stratum_instance_t *client, *ret = NULL; + + ck_wlock(&sdata->instance_lock); + for (client = sdata->stratum_instances; client; client = client->hh.next) { + if (likely(client->virtualid != *client_id)) + continue; + if (likely(!client->dropped)) { + ret = client; + __inc_instance_ref(ret); + /* Replace the client_id with the correct one, allowing + * us to send the response to the correct client */ + *client_id = client->id; + } + break; + } + ck_wunlock(&sdata->instance_lock); + + return ret; +} + +void parse_upstream_auth(ckpool_t *ckp, json_t *val) +{ + json_t *id_val = NULL, *err_val = NULL; + sdata_t *sdata = ckp->sdata; + stratum_instance_t *client; + bool ret, warn = false; + int64_t client_id; + + id_val = json_object_get(val, "id"); + if (unlikely(!id_val)) + goto out; + if (unlikely(!json_get_int64(&client_id, val, "client_id"))) + goto out; + if (unlikely(!json_get_bool(&ret, val, "result"))) + goto out; + err_val = json_object_get(val, "error"); + client = ref_instance_by_id(sdata, client_id); + /* Is this client_id a virtualid from a passthrough subclient */ + if (!client) + client = ref_instance_by_virtualid(sdata, &client_id); + if (!client) { + LOGINFO("Failed to find client id %"PRId64" in parse_upstream_auth", + client_id); + goto out; + } + if (ret) + send_auth_success(ckp, sdata, client); + else + send_auth_failure(sdata, client); + send_auth_response(sdata, client_id, ret, id_val, err_val); + client_auth(ckp, client, client->user_instance, ret); + dec_instance_ref(sdata, client); +out: + if (unlikely(warn)) { + char *s = json_dumps(val, 0); + + LOGWARNING("Failed to get valid upstream result in parse_upstream_auth %s", s); + free(s); + } +} + +void parse_upstream_workinfo(ckpool_t *ckp, json_t *val) +{ + add_node_base(ckp, val, true, 0); +} + +#define parse_remote_workinfo(ckp, val, client_id) add_node_base(ckp, val, true, client_id) + +static void parse_remote_auth(ckpool_t *ckp, sdata_t *sdata, json_t *val, stratum_instance_t *remote, + const int64_t remote_id) +{ + json_t *params, *method, *id_val; + stratum_instance_t *client; + json_params_t *jp; + int64_t client_id; + + if (ckp->btcsolo) { + LOGWARNING("Got remote auth request in btcsolo mode, ignoring!"); + return; + } + json_get_int64(&client_id, val, "clientid"); + /* Encode remote server client_id into remote client's id */ + client_id = (remote_id << 32) | (client_id & 0xffffffffll); + id_val = json_object_get(val, "id"); + method = json_object_get(val, "method"); + params = json_object_get(val, "params"); + jp = create_json_params(client_id, method, params, id_val); + + /* This is almost certainly the first time we'll see this client_id so + * create a new stratum instance temporarily just for auth with a plan + * to drop the client id locally once we finish with it */ + ck_wlock(&sdata->instance_lock); + client = __instance_by_id(sdata, client_id); + if (likely(!client)) + client = __stratum_add_instance(ckp, client_id, remote->address, remote->server); + client->remote = true; + json_strdup(&client->useragent, val, "useragent"); + json_strcpy(client->enonce1, val, "enonce1"); + json_strcpy(client->address, val, "address"); + ck_wunlock(&sdata->instance_lock); + + ckmsgq_add(sdata->sauthq, jp); +} + +/* Get the remote worker count once per minute from all the remote servers */ +static void parse_remote_workers(sdata_t *sdata, const json_t *val, const char *buf) +{ + json_t *username_val = json_object_get(val, "username"); + user_instance_t *user; + const char *username; + int workers; + + username = json_string_value(username_val); + if (unlikely(!username_val || !username)) { + LOGWARNING("Failed to get username from remote message %s", buf); + return; + } + user = get_user(sdata, username); + if (unlikely(!json_get_int(&workers, val, "workers"))) { + LOGWARNING("Failed to get workers from remote message %s", buf); + return; + } + user->remote_workers += workers; + LOGDEBUG("Adding %d remote workers to user %s", workers, username); +} + +/* Attempt to submit a remote block locally by recreating it from its workinfo */ +static void parse_remote_block(ckpool_t *ckp, sdata_t *sdata, json_t *val, const char *buf, + const int64_t client_id) +{ + json_t *workername_val = json_object_get(val, "workername"), + *name_val = json_object_get(val, "name"), *res; + const char *workername, *name, *coinbasehex, *swaphex, *cnfrm; + workbase_t *wb = NULL; + double diff = 0; + int height = 0; + int64_t id = 0; + char *msg; + int cblen; + + name = json_string_value(name_val); + if (!name_val || !name) + goto out_add; + + /* If this is the confirm block message don't try to resubmit it */ + cnfrm = json_string_value(json_object_get(val, "confirmed")); + if (cnfrm && cnfrm[0] == '1') + goto out_add; + + json_get_int64(&id, val, "workinfoid"); + coinbasehex = json_string_value(json_object_get(val, "coinbasehex")); + swaphex = json_string_value(json_object_get(val, "swaphex")); + json_get_int(&cblen, val, "cblen"); + json_get_double(&diff, val, "diff"); + + if (likely(id && coinbasehex && swaphex && cblen)) + wb = get_remote_workbase(sdata, id, client_id); + + if (unlikely(!wb)) + LOGWARNING("Inadequate data locally to attempt submit of remote block"); + else { + uchar swap[80], hash[32], hash1[32], flip32[32]; + char *coinbase = alloca(cblen), *gbt_block; + char blockhash[68]; + + LOGWARNING("Possible remote block solve diff %lf !", diff); + hex2bin(coinbase, coinbasehex, cblen); + hex2bin(swap, swaphex, 80); + sha256(swap, 80, hash1); + sha256(hash1, 32, hash); + gbt_block = process_block(wb, coinbase, cblen, swap, hash, flip32, blockhash); + /* Note nodes use jobid of the mapped_id instead of workinfoid */ + json_set_int64(val, "jobid", wb->mapped_id); + send_nodes_block(sdata, val, client_id); + /* We rely on the remote server to give us the ID_BLOCK + * responses, so only use this response to determine if we + * should reset the best shares. */ + if (local_block_submit(ckp, gbt_block, flip32, wb->height)) { + block_share_summary(sdata); + reset_bestshares(sdata); + } + put_remote_workbase(sdata, wb); + } + + workername = json_string_value(workername_val); + if (unlikely(!workername_val || !workername)) { + LOGWARNING("Failed to get workername from remote message %s", buf); + workername = ""; + } + if (unlikely(!json_get_int(&height, val, "height"))) + LOGWARNING("Failed to get height from remote message %s", buf); + ASPRINTF(&msg, "Block %d solved by %s @ %s!", height, workername, name); + LOGWARNING("%s", msg); + stratum_broadcast_message(sdata, msg); + free(msg); +out_add: + /* Make a duplicate for use downstream */ + res = json_deep_copy(val); + remap_workinfo_id(sdata, res, client_id); + if (!ckp->remote) + downstream_json(sdata, res, client_id, SSEND_PREPEND); + + json_decref(res); +} + +void parse_upstream_block(ckpool_t *ckp, json_t *val) +{ + char *buf; + sdata_t *sdata = ckp->sdata; + + buf = json_dumps(val, 0); + parse_remote_block(ckp, sdata, val, buf, 0); + free(buf); +} + +static void send_remote_pong(sdata_t *sdata, stratum_instance_t *client) +{ + json_t *json_msg; + + JSON_CPACK(json_msg, "{ss}", "method", "pong"); + stratum_add_send(sdata, json_msg, client->id, SM_PONG); +} + +static void add_node_txns(ckpool_t *ckp, sdata_t *sdata, const json_t *val) +{ + json_t *txn_array, *txn_val, *data_val, *hash_val; + txntable_t *txns = NULL; + int i, arr_size; + int added = 0; + + txn_array = json_object_get(val, "transaction"); + arr_size = json_array_size(txn_array); + + for (i = 0; i < arr_size; i++) { + const char *hash, *data; + + txn_val = json_array_get(txn_array, i); + data_val = json_object_get(txn_val, "data"); + hash_val = json_object_get(txn_val, "hash"); + data = json_string_value(data_val); + hash = json_string_value(hash_val); + if (unlikely(!data || !hash)) { + LOGERR("Failed to get hash/data in add_node_txns"); + continue; + } + + if (add_txn(ckp, sdata, &txns, hash, data, false)) + added++; + } + + if (added) + update_txns(ckp, sdata, txns, false); +} + +void parse_remote_txns(ckpool_t *ckp, const json_t *val) +{ + add_node_txns(ckp, ckp->sdata, val); +} + +static json_t *get_hash_transactions(sdata_t *sdata, const json_t *hashes) +{ + json_t *txn_array = json_array(), *arr_val; + int found = 0; + size_t index; + + ck_rlock(&sdata->txn_lock); + json_array_foreach(hashes, index, arr_val) { + const char *hash = json_string_value(arr_val); + json_t *txn_val; + txntable_t *txn; + + HASH_FIND_STR(sdata->txns, hash, txn); + if (!txn) + continue; + JSON_CPACK(txn_val, "{ss,ss}", + "hash", hash, "data", txn->data); + json_array_append_new(txn_array, txn_val); + found++; + } + ck_runlock(&sdata->txn_lock); + + return txn_array; +} + +static json_t *get_reqtxns(sdata_t *sdata, const json_t *val, bool downstream) +{ + json_t *hashes = json_object_get(val, "hash"); + json_t *txns, *ret = NULL; + int requested, found; + + if (unlikely(!hashes) || !json_is_array(hashes)) + goto out; + requested = json_array_size(hashes); + if (unlikely(!requested)) + goto out; + + txns = get_hash_transactions(sdata, hashes); + found = json_array_size(txns); + if (found) { + JSON_CPACK(ret, "{ssso}", "method", stratum_msgs[SM_TRANSACTIONS], "transaction", txns); + LOGINFO("Sending %d found of %d requested txns %s", found, requested, + downstream ? "downstream" : "upstream"); + } else + json_decref(txns); +out: + return ret; +} + +static void parse_remote_reqtxns(sdata_t *sdata, const json_t *val, const int64_t client_id) +{ + json_t *ret = get_reqtxns(sdata, val, true); + + if (!ret) + return; + stratum_add_send(sdata, ret, client_id, SM_TRANSACTIONS); +} + +void parse_upstream_reqtxns(ckpool_t *ckp, json_t *val) +{ + json_t *ret = get_reqtxns(ckp->sdata, val, false); + char *msg; + + if (!ret) + return; + msg = json_dumps(ret, JSON_NO_UTF8 | JSON_PRESERVE_ORDER | JSON_COMPACT | JSON_EOL); + json_decref(ret); + connector_upstream_msg(ckp, msg); +} + +static void parse_trusted_msg(ckpool_t *ckp, sdata_t *sdata, json_t *val, stratum_instance_t *client) +{ + json_t *method_val = json_object_get(val, "method"); + char *buf = json_dumps(val, 0); + const char *method; + + LOGDEBUG("Got remote message %s", buf); + method = json_string_value(method_val); + if (unlikely(!method_val || !method)) { + LOGWARNING("Failed to get method from remote message %s", buf); + goto out; + } + + if (likely(!safecmp(method, stratum_msgs[SM_SHARE]))) + parse_remote_share(ckp, sdata, val, buf); + else if (!safecmp(method, stratum_msgs[SM_TRANSACTIONS])) + add_node_txns(ckp, sdata, val); + else if (!safecmp(method, stratum_msgs[SM_WORKINFO])) + parse_remote_workinfo(ckp, val, client->id); + else if (!safecmp(method, stratum_msgs[SM_AUTH])) + parse_remote_auth(ckp, sdata, val, client, client->id); + else if (!safecmp(method, stratum_msgs[SM_SHAREERR])) + parse_remote_shareerr(ckp, val, buf); + else if (!safecmp(method, stratum_msgs[SM_BLOCK])) + parse_remote_block(ckp, sdata, val, buf, client->id); + else if (!safecmp(method, stratum_msgs[SM_REQTXNS])) + parse_remote_reqtxns(sdata, val, client->id); + else if (!safecmp(method, "workers")) + parse_remote_workers(sdata, val, buf); + else if (!safecmp(method, "ping")) + send_remote_pong(sdata, client); + else + LOGWARNING("unrecognised trusted message %s", buf); +out: + free(buf); +} + +/* Entered with client holding ref count */ +static void node_client_msg(ckpool_t *ckp, json_t *val, stratum_instance_t *client) +{ + json_t *params, *method, *res_val, *id_val, *err_val = NULL; + int msg_type = node_msg_type(val); + sdata_t *sdata = ckp->sdata; + json_params_t *jp; + char *buf = NULL; + + if (msg_type < 0) { + buf = json_dumps(val, 0); + LOGERR("Missing client %s node method from %s", client->identity, buf); + goto out; + } + LOGDEBUG("Got client %s node method %d:%s", client->identity, msg_type, stratum_msgs[msg_type]); + id_val = json_object_get(val, "id"); + method = json_object_get(val, "method"); + params = json_object_get(val, "params"); + res_val = json_object_get(val, "result"); + switch (msg_type) { + case SM_SHARE: + jp = create_json_params(client->id, method, params, id_val); + ckmsgq_add(sdata->sshareq, jp); + break; + case SM_SHARERESULT: + parse_share_result(ckp, client, res_val); + break; + case SM_DIFF: + parse_diff(client, params); + break; + case SM_SUBSCRIBE: + parse_subscribe(client, client->id, params); + break; + case SM_SUBSCRIBERESULT: + parse_subscribe_result(client, res_val); + break; + case SM_AUTH: + parse_authorise(client, params, &err_val); + break; + case SM_AUTHRESULT: + parse_authorise_result(ckp, sdata, client, res_val); + break; + case SM_NONE: + buf = json_dumps(val, 0); + LOGNOTICE("Unrecognised method from client %s :%s", + client->identity, buf); + break; + default: + break; + } +out: + free(buf); +} + +static void parse_node_msg(ckpool_t *ckp, sdata_t *sdata, json_t *val) +{ + int msg_type = node_msg_type(val); + + if (msg_type < 0) { + char *buf = json_dumps(val, 0); + + LOGERR("Missing node method from %s", buf); + free(buf); + return; + } + LOGDEBUG("Got node method %d:%s", msg_type, stratum_msgs[msg_type]); + switch (msg_type) { + case SM_TRANSACTIONS: + add_node_txns(ckp, sdata, val); + break; + case SM_WORKINFO: + add_node_base(ckp, val, false, 0); + break; + case SM_BLOCK: + submit_node_block(ckp, sdata, val); + break; + default: + break; + } +} + +/* Entered with client holding ref count */ +static void parse_instance_msg(ckpool_t *ckp, sdata_t *sdata, smsg_t *msg, stratum_instance_t *client) +{ + json_t *val = msg->json_msg, *id_val, *method, *params; + int64_t client_id = msg->client_id; + int delays = 0; + + if (client->reject == 3) { + LOGINFO("Dropping client %s %s tagged for lazy invalidation", + client->identity, client->address); + connector_drop_client(ckp, client_id); + return; + } + + /* Return back the same id_val even if it's null or not existent. */ + id_val = json_object_get(val, "id"); + + method = json_object_get(val, "method"); + if (unlikely(!method)) { + json_t *res_val = json_object_get(val, "result"); + + /* Is this a spurious result or ping response? */ + if (res_val) { + const char *result = json_string_value(res_val); + + if (!safecmp(result, "pong")) + LOGDEBUG("Received pong from client %s", client->identity); + else + LOGDEBUG("Received spurious response %s from client %s", + result ? result : "", client->identity); + return; + } + send_json_err(sdata, client_id, id_val, "-3:method not found"); + return; + } + if (unlikely(!json_is_string(method))) { + send_json_err(sdata, client_id, id_val, "-1:method is not string"); + return; + } + params = json_object_get(val, "params"); + if (unlikely(!params)) { + send_json_err(sdata, client_id, id_val, "-1:params not found"); + return; + } + /* At startup we block until there's a current workbase otherwise we + * will reject miners with the initialising message. A slightly delayed + * response to subscribe is better tolerated. */ + while (unlikely(!ckp->proxy && !sdata->current_workbase)) { + cksleep_ms(100); + if (!(++delays % 50)) + LOGWARNING("%d Second delay waiting for bitcoind at startup", delays / 10); + } + parse_method(ckp, sdata, client, client_id, id_val, method, params); +} + +static void srecv_process(ckpool_t *ckp, json_t *val) +{ + char address[INET6_ADDRSTRLEN], *buf = NULL; + bool noid = false, dropped = false; + sdata_t *sdata = ckp->sdata; + stratum_instance_t *client; + smsg_t *msg; + int server; + + if (unlikely(!val)) { + LOGWARNING("srecv_process received NULL val!"); + return; + } + + msg = ckzalloc(sizeof(smsg_t)); + msg->json_msg = val; + val = json_object_get(msg->json_msg, "client_id"); + if (unlikely(!val)) { + if (ckp->node) + parse_node_msg(ckp, sdata, msg->json_msg); + else { + buf = json_dumps(val, JSON_COMPACT); + LOGWARNING("Failed to extract client_id from connector json smsg %s", buf); + } + goto out; + } + + msg->client_id = json_integer_value(val); + json_object_clear(val); + + val = json_object_get(msg->json_msg, "address"); + if (unlikely(!val)) { + buf = json_dumps(val, JSON_COMPACT); + LOGWARNING("Failed to extract address from connector json smsg %s", buf); + goto out; + } + strcpy(address, json_string_value(val)); + json_object_clear(val); + + val = json_object_get(msg->json_msg, "server"); + if (unlikely(!val)) { + buf = json_dumps(val, JSON_COMPACT); + LOGWARNING("Failed to extract server from connector json smsg %s", buf); + goto out; + } + server = json_integer_value(val); + json_object_clear(val); + + /* Parse the message here */ + ck_wlock(&sdata->instance_lock); + client = __instance_by_id(sdata, msg->client_id); + /* If client_id instance doesn't exist yet, create one */ + if (unlikely(!client)) { + noid = true; + client = __stratum_add_instance(ckp, msg->client_id, address, server); + } else if (unlikely(client->dropped)) + dropped = true; + if (likely(!dropped)) + __inc_instance_ref(client); + ck_wunlock(&sdata->instance_lock); + + if (unlikely(dropped)) { + /* Client may be NULL here */ + LOGNOTICE("Stratifier skipped dropped instance %"PRId64" message from server %d", + msg->client_id, server); + connector_drop_client(ckp, msg->client_id); + goto out; + } + if (unlikely(noid)) + LOGINFO("Stratifier added instance %s server %d", client->identity, server); + + if (client->trusted) + parse_trusted_msg(ckp, sdata, msg->json_msg, client); + else if (ckp->node) + node_client_msg(ckp, msg->json_msg, client); + else + parse_instance_msg(ckp, sdata, msg, client); + dec_instance_ref(sdata, client); +out: + free_smsg(msg); + free(buf); +} + +void _stratifier_add_recv(ckpool_t *ckp, json_t *val, const char *file, const char *func, const int line) +{ + sdata_t *sdata; + + if (unlikely(!val)) { + LOGWARNING("_stratifier_add_recv received NULL val from %s %s:%d", file, func, line); + return; + } + sdata = ckp->sdata; + ckmsgq_add(sdata->srecvs, val); +} + +static void ssend_process(ckpool_t *ckp, smsg_t *msg) +{ + if (unlikely(!msg->json_msg)) { + LOGERR("Sent null json msg to stratum_sender"); + free(msg); + return; + } + + /* Add client_id to the json message and send it to the + * connector process to be delivered */ + json_object_set_new_nocheck(msg->json_msg, "client_id", json_integer(msg->client_id)); + connector_add_message(ckp, msg->json_msg); + /* The connector will free msg->json_msg */ + free(msg); +} + +static void discard_json_params(json_params_t *jp) +{ + json_decref(jp->method); + json_decref(jp->params); + if (jp->id_val) + json_decref(jp->id_val); + free(jp); +} + +static void steal_json_id(json_t *val, json_params_t *jp) +{ + /* Steal the id_val as is to avoid a copy */ + json_object_set_new_nocheck(val, "id", jp->id_val); + jp->id_val = NULL; +} + +static void sshare_process(ckpool_t *ckp, json_params_t *jp) +{ + json_t *result_val, *json_msg, *err_val = NULL; + stratum_instance_t *client; + sdata_t *sdata = ckp->sdata; + int64_t client_id; + + client_id = jp->client_id; + + client = ref_instance_by_id(sdata, client_id); + if (unlikely(!client)) { + LOGINFO("Share processor failed to find client id %"PRId64" in hashtable!", client_id); + goto out; + } + if (unlikely(!client->authorised)) { + LOGDEBUG("Client %s no longer authorised to submit shares", client->identity); + goto out_decref; + } + json_msg = json_object(); + result_val = parse_submit(client, json_msg, jp->params, &err_val); + json_object_set_new_nocheck(json_msg, "result", result_val); + json_object_set_new_nocheck(json_msg, "error", err_val ? err_val : json_null()); + steal_json_id(json_msg, jp); + stratum_add_send(sdata, json_msg, client_id, SM_SHARERESULT); +out_decref: + dec_instance_ref(sdata, client); +out: + discard_json_params(jp); +} + +/* As ref_instance_by_id but only returns clients not authorising or authorised, + * and sets the authorising flag */ +static stratum_instance_t *preauth_ref_instance_by_id(sdata_t *sdata, const int64_t id) +{ + stratum_instance_t *client; + + ck_wlock(&sdata->instance_lock); + client = __instance_by_id(sdata, id); + if (client) { + if (client->dropped || client->authorising || client->authorised) + client = NULL; + else { + __inc_instance_ref(client); + client->authorising = true; + } + } + ck_wunlock(&sdata->instance_lock); + + return client; +} + +/* Send the auth upstream in trusted remote mode, allowing the connector to + * asynchronously receive the response and return the auth response. */ +static void upstream_auth(ckpool_t *ckp, stratum_instance_t *client, json_params_t *jp) +{ + json_t *val = json_object(); + char cdfield[64]; + char *msg; + ts_t now; + + ts_realtime(&now); + sprintf(cdfield, "%lu,%lu", now.tv_sec, now.tv_nsec); + + json_set_object(val, "params", jp->params); + json_set_object(val, "id", jp->id_val); + json_set_object(val, "method", jp->method); + json_set_string(val, "method", stratum_msgs[SM_AUTH]); + + json_set_string(val, "useragent", client->useragent ? : ""); + json_set_string(val, "enonce1", client->enonce1 ? : ""); + json_set_string(val, "address", client->address); + json_set_int64(val, "clientid", client->virtualid); + msg = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER | JSON_COMPACT | JSON_EOL); + json_decref(val); + connector_upstream_msg(ckp, msg); +} + +static void sauth_process(ckpool_t *ckp, json_params_t *jp) +{ + json_t *result_val, *err_val = NULL; + sdata_t *sdata = ckp->sdata; + stratum_instance_t *client; + int64_t mindiff, client_id; + bool ret; + + client_id = jp->client_id; + + client = preauth_ref_instance_by_id(sdata, client_id); + if (unlikely(!client)) { + LOGINFO("Authoriser failed to find client id %"PRId64" in hashtable!", client_id); + goto out_noclient; + } + + result_val = parse_authorise(client, jp->params, &err_val); + ret = json_is_true(result_val); + if (ret) { + /* So far okay in remote mode, remainder to be done by upstream + * pool */ + if (ckp->remote && !ckp->btcsolo) { + upstream_auth(ckp, client, jp); + goto out; + } + send_auth_success(ckp, sdata, client); + } else + send_auth_failure(sdata, client); + send_auth_response(sdata, client_id, ret, jp->id_val, err_val); + if (!ret) + goto out; + + if (client->remote) { + /* We don't need to keep a record of clients on remote trusted + * servers after auth'ing them. */ + client->dropped = true; + goto out; + } + + /* Update the client now if they have set a valid mindiff different + * from the startdiff. suggest_diff overrides worker mindiff */ + if (client->suggest_diff) + mindiff = client->suggest_diff; + else + mindiff = client->worker_instance->mindiff; + if (mindiff) { + mindiff = MAX(ckp->mindiff, mindiff); + if (mindiff != client->diff) { + client->diff = mindiff; + stratum_send_diff(sdata, client); + } + } + +out: + dec_instance_ref(sdata, client); +out_noclient: + discard_json_params(jp); + +} + +static int transactions_by_jobid(sdata_t *sdata, const int64_t id) +{ + workbase_t *wb; + int ret = -1; + + ck_rlock(&sdata->workbase_lock); + HASH_FIND_I64(sdata->workbases, &id, wb); + if (wb) + ret = wb->txns; + ck_runlock(&sdata->workbase_lock); + + return ret; +} + +static json_t *txnhashes_by_jobid(sdata_t *sdata, const int64_t id) +{ + json_t *ret = NULL; + workbase_t *wb; + + ck_rlock(&sdata->workbase_lock); + HASH_FIND_I64(sdata->workbases, &id, wb); + if (wb) + ret = json_string(wb->txn_hashes); + ck_runlock(&sdata->workbase_lock); + + return ret; +} + +static void send_transactions(ckpool_t *ckp, json_params_t *jp) +{ + const char *msg = json_string_value(jp->method), + *params = json_string_value(json_array_get(jp->params, 0)); + stratum_instance_t *client = NULL; + sdata_t *sdata = ckp->sdata; + json_t *val, *hashes; + int64_t job_id = 0; + time_t now_t; + + if (unlikely(!msg || !strlen(msg))) { + LOGWARNING("send_transactions received null method"); + goto out; + } + val = json_object(); + steal_json_id(val, jp); + if (cmdmatch(msg, "mining.get_transactions")) { + int txns; + + /* We don't actually send the transactions as that would use + * up huge bandwidth, so we just return the number of + * transactions :) . Support both forms of encoding the + * request in method name and as a parameter. */ + if (params && strlen(params) > 0) + sscanf(params, "%lx", &job_id); + else + sscanf(msg, "mining.get_transactions(%lx", &job_id); + txns = transactions_by_jobid(sdata, job_id); + if (txns != -1) { + json_set_int(val, "result", txns); + json_object_set_new_nocheck(val, "error", json_null()); + } else + json_set_string(val, "error", "Invalid job_id"); + goto out_send; + } + if (!cmdmatch(msg, "mining.get_txnhashes")) { + LOGDEBUG("Unhandled mining get request: %s", msg); + json_set_string(val, "error", "Unhandled"); + goto out_send; + } + + client = ref_instance_by_id(sdata, jp->client_id); + if (unlikely(!client)) { + LOGINFO("send_transactions failed to find client id %"PRId64" in hashtable!", + jp->client_id); + goto out; + } + + now_t = time(NULL); + if (now_t - client->last_txns < ckp->update_interval) { + LOGNOTICE("Rate limiting get_txnhashes on client %"PRId64"!", jp->client_id); + json_set_string(val, "error", "Ratelimit"); + goto out_send; + } + client->last_txns = now_t; + if (!params || !strlen(params)) { + json_set_string(val, "error", "Invalid params"); + goto out_send; + } + sscanf(params, "%lx", &job_id); + hashes = txnhashes_by_jobid(sdata, job_id); + if (hashes) { + json_object_set_new_nocheck(val, "result", hashes); + json_object_set_new_nocheck(val, "error", json_null()); + } else + json_set_string(val, "error", "Invalid job_id"); +out_send: + stratum_add_send(sdata, val, jp->client_id, SM_TXNSRESULT); +out: + if (client) + dec_instance_ref(sdata, client); + discard_json_params(jp); +} + +static void add_log_entry(log_entry_t **entries, char **fname, char **buf) +{ + log_entry_t *entry = ckalloc(sizeof(log_entry_t)); + + entry->fname = *fname; + *fname = NULL; + entry->buf = *buf; + *buf = NULL; + DL_APPEND(*entries, entry); +} + +static void dump_log_entries(log_entry_t **entries) +{ + log_entry_t *entry, *tmpentry; + FILE *fp; + + DL_FOREACH_SAFE(*entries, entry, tmpentry) { + DL_DELETE(*entries, entry); + fp = fopen(entry->fname, "we"); + if (likely(fp)) { + fprintf(fp, "%s", entry->buf); + fclose(fp); + } else + LOGERR("Failed to fopen %s in dump_log_entries", entry->fname); + free(entry->fname); + free(entry->buf); + free(entry); + } +} + +static void upstream_workers(ckpool_t *ckp, user_instance_t *user) +{ + char *msg; + + ASPRINTF(&msg, "{\"method\":\"workers\",\"username\":\"%s\",\"workers\":%d}\n", + user->username, user->workers); + connector_upstream_msg(ckp, msg); +} + + +/* To iterate over all users, if user is initially NULL, this will return the first entry, + * otherwise it will return the entry after user, and NULL if there are no more entries. + * Allows us to grab and drop the lock on each iteration. */ +static user_instance_t *next_user(sdata_t *sdata, user_instance_t *user) +{ + ck_rlock(&sdata->instance_lock); + if (unlikely(!user)) + user = sdata->user_instances; + else + user = user->hh.next; + ck_runlock(&sdata->instance_lock); + + return user; +} + +/* Ditto for worker */ +static worker_instance_t *next_worker(sdata_t *sdata, user_instance_t *user, worker_instance_t *worker) +{ + ck_rlock(&sdata->instance_lock); + if (!worker) + worker = user->worker_instances; + else + worker = worker->next; + ck_runlock(&sdata->instance_lock); + + return worker; +} + +static void *statsupdate(void *arg) +{ + ckpool_t *ckp = (ckpool_t *)arg; + sdata_t *sdata = ckp->sdata; + pool_stats_t *stats = &sdata->stats; + + pthread_detach(pthread_self()); + rename_proc("statsupdate"); + + tv_time(&stats->start_time); + cksleep_prepare_r(&stats->last_update); + sleep(1); + + while (42) { + double ghs, ghs1, ghs5, ghs15, ghs60, ghs360, ghs1440, ghs10080, + per_tdiff, percent; + char suffix1[16], suffix5[16], suffix15[16], suffix60[16], cdfield[64]; + char suffix360[16], suffix1440[16], suffix10080[16]; + int remote_users = 0, remote_workers = 0, idle_workers = 0; + log_entry_t *log_entries = NULL; + char_entry_t *char_list = NULL; + stratum_instance_t *client; + user_instance_t *user; + char *fname, *s, *sp; + tv_t now, diff; + ts_t ts_now; + json_t *val; + FILE *fp; + int i; + + tv_time(&now); + timersub(&now, &stats->start_time, &diff); + + ck_wlock(&sdata->instance_lock); + /* Grab the first entry */ + client = sdata->stratum_instances; + if (likely(client)) + __inc_instance_ref(client); + ck_wunlock(&sdata->instance_lock); + + while (client) { + tv_time(&now); + /* Look for clients that may have been dropped which the + * stratifier has not been informed about and ask the + * connector if they still exist */ + if (client->dropped) + connector_test_client(ckp, client->id); + else if (remote_server(client)) { + /* Do nothing to these */ + } else if (!client->authorised) { + /* Test for clients that haven't authed in over a minute + * and drop them lazily */ + if (now.tv_sec > client->start_time + 60) { + client->dropped = true; + connector_drop_client(ckp, client->id); + } + } else { + per_tdiff = tvdiff(&now, &client->last_share); + /* Decay times per connected instance */ + if (per_tdiff > 60) { + /* No shares for over a minute, decay to 0 */ + decay_client(client, 0, &now); + idle_workers++; + if (per_tdiff > 600) + client->idle = true; + /* Test idle clients are still connected */ + connector_test_client(ckp, client->id); + } + } + + ck_wlock(&sdata->instance_lock); + /* Drop the reference of the last entry we examined, + * then grab the next client. */ + __dec_instance_ref(client); + client = client->hh.next; + /* Grab a reference to this client allowing us to examine + * it without holding the lock */ + if (likely(client)) + __inc_instance_ref(client); + ck_wunlock(&sdata->instance_lock); + } + + user = NULL; + + while ((user = next_user(sdata, user)) != NULL) { + worker_instance_t *worker; + json_t *user_array; + + if (!user->authorised) + continue; + + tv_time(&now); + + /* Decay times per user */ + per_tdiff = tvdiff(&now, &user->last_share); + /* Drop storage of users with no shares */ + if (!user->last_share.tv_sec) { + LOGDEBUG("Skipping inactive user %s", user->username); + continue; + } + if (per_tdiff > 60) + decay_user(user, 0, &now); + + ghs = user->dsps1440 * nonces; + suffix_string(ghs, suffix1440, 16, 0); + + ghs = user->dsps1 * nonces; + suffix_string(ghs, suffix1, 16, 0); + + ghs = user->dsps5 * nonces; + suffix_string(ghs, suffix5, 16, 0); + + ghs = user->dsps60 * nonces; + suffix_string(ghs, suffix60, 16, 0); + + ghs = user->dsps10080 * nonces; + suffix_string(ghs, suffix10080, 16, 0); + + JSON_CPACK(val, "{ss,ss,ss,ss,ss,si,si,sI,sf,sI, sI}", + "hashrate1m", suffix1, + "hashrate5m", suffix5, + "hashrate1hr", suffix60, + "hashrate1d", suffix1440, + "hashrate7d", suffix10080, + "lastshare", user->last_share.tv_sec, + "workers", user->workers + user->remote_workers, + "shares", user->shares, + "bestshare", user->best_diff, + "bestever", user->best_ever, + "authorised", user->auth_time); + + if (user->remote_workers) { + remote_workers += user->remote_workers; + /* Reset the remote_workers count once per minute */ + user->remote_workers = 0; + /* We check this unlocked but transiently + * wrong is harmless */ + if (!user->workers) + remote_users++; + } + + s = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER | JSON_COMPACT); + ASPRINTF(&sp, "User %s:%s", user->username, s); + dealloc(s); + add_msg_entry(&char_list, &sp); + + user_array = json_array(); + worker = NULL; + + /* Decay times per worker */ + while ((worker = next_worker(sdata, user, worker)) != NULL) { + json_t *wval; + + per_tdiff = tvdiff(&now, &worker->last_share); + if (per_tdiff > 60) { + decay_worker(worker, 0, &now); + worker->idle = true; + /* Drop storage of workers idle for 1 week */ + if (per_tdiff > 600000) { + LOGDEBUG("Skipping inactive worker %s", worker->workername); + continue; + } + } + + ghs = worker->dsps1440 * nonces; + suffix_string(ghs, suffix1440, 16, 0); + + ghs = worker->dsps1 * nonces; + suffix_string(ghs, suffix1, 16, 0); + + ghs = worker->dsps5 * nonces; + suffix_string(ghs, suffix5, 16, 0); + + ghs = worker->dsps60 * nonces; + suffix_string(ghs, suffix60, 16, 0); + + ghs = worker->dsps10080 * nonces; + suffix_string(ghs, suffix10080, 16, 0); + + LOGDEBUG("Storing worker %s", worker->workername); + + JSON_CPACK(wval, "{ss,ss,ss,ss,ss,ss,si,sI,sf,sI}", + "workername", worker->workername, + "hashrate1m", suffix1, + "hashrate5m", suffix5, + "hashrate1hr", suffix60, + "hashrate1d", suffix1440, + "hashrate7d", suffix10080, + "lastshare", worker->last_share.tv_sec, + "shares", worker->shares, + "bestshare", worker->best_diff, + "bestever", worker->best_ever); + json_array_append_new(user_array, wval); + } + + json_object_set_new_nocheck(val, "worker", user_array); + ASPRINTF(&fname, "%s/users/%s", ckp->logdir, user->username); + s = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER | JSON_EOL | + JSON_REAL_PRECISION(16) | JSON_INDENT(1)); + add_log_entry(&log_entries, &fname, &s); + json_decref(val); + if (ckp->remote) + upstream_workers(ckp, user); + } + + if (remote_workers) { + mutex_lock(&sdata->stats_lock); + stats->remote_workers = remote_workers; + stats->remote_users = remote_users; + mutex_unlock(&sdata->stats_lock); + } + + /* Dump log entries out of instance_lock */ + dump_log_entries(&log_entries); + notice_msg_entries(&char_list); + + ghs1 = stats->dsps1 * nonces; + suffix_string(ghs1, suffix1, 16, 0); + + ghs5 = stats->dsps5 * nonces; + suffix_string(ghs5, suffix5, 16, 0); + + ghs15 = stats->dsps15 * nonces; + suffix_string(ghs15, suffix15, 16, 0); + + ghs60 = stats->dsps60 * nonces; + suffix_string(ghs60, suffix60, 16, 0); + + ghs360 = stats->dsps360 * nonces; + suffix_string(ghs360, suffix360, 16, 0); + + ghs1440 = stats->dsps1440 * nonces; + suffix_string(ghs1440, suffix1440, 16, 0); + + ghs10080 = stats->dsps10080 * nonces; + suffix_string(ghs10080, suffix10080, 16, 0); + + ASPRINTF(&fname, "%s/pool/pool.status", ckp->logdir); + fp = fopen(fname, "we"); + if (unlikely(!fp)) + LOGERR("Failed to fopen %s", fname); + dealloc(fname); + + JSON_CPACK(val, "{si,si,si,si,si,si}", + "runtime", diff.tv_sec, + "lastupdate", now.tv_sec, + "Users", stats->users + stats->remote_users, + "Workers", stats->workers + stats->remote_workers, + "Idle", idle_workers, + "Disconnected", stats->disconnected); + s = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER); + json_decref(val); + LOGNOTICE("Pool:%s", s); + fprintf(fp, "%s\n", s); + dealloc(s); + + JSON_CPACK(val, "{ss,ss,ss,ss,ss,ss,ss}", + "hashrate1m", suffix1, + "hashrate5m", suffix5, + "hashrate15m", suffix15, + "hashrate1hr", suffix60, + "hashrate6hr", suffix360, + "hashrate1d", suffix1440, + "hashrate7d", suffix10080); + s = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER); + json_decref(val); + LOGNOTICE("Pool:%s", s); + fprintf(fp, "%s\n", s); + dealloc(s); + + /* Round to 4 significant digits */ + percent = round(stats->accounted_diff_shares * 10000 / stats->network_diff) / 100; + JSON_CPACK(val, "{sf,sI,sI,sI,sf,sf,sf,sf}", + "diff", percent, + "accepted", stats->accounted_diff_shares, + "rejected", stats->accounted_rejects, + "bestshare", stats->best_diff, + "SPS1m", stats->sps1, + "SPS5m", stats->sps5, + "SPS15m", stats->sps15, + "SPS1h", stats->sps60); + s = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER | JSON_REAL_PRECISION(3)); + json_decref(val); + LOGNOTICE("Pool:%s", s); + fprintf(fp, "%s\n", s); + dealloc(s); + fclose(fp); + + if (ckp->proxy && sdata->proxy) { + proxy_t *proxy, *proxytmp, *subproxy, *subtmp; + + mutex_lock(&sdata->proxy_lock); + JSON_CPACK(val, "{sI,si,si}", + "current", sdata->proxy->id, + "active", HASH_COUNT(sdata->proxies), + "total", sdata->proxy_count); + mutex_unlock(&sdata->proxy_lock); + + s = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER); + json_decref(val); + LOGNOTICE("Proxy:%s", s); + dealloc(s); + + mutex_lock(&sdata->proxy_lock); + HASH_ITER(hh, sdata->proxies, proxy, proxytmp) { + JSON_CPACK(val, "{sI,si,sI,sb}", + "id", proxy->id, + "subproxies", proxy->subproxy_count, + "clients", proxy->combined_clients, + "alive", !proxy->dead); + s = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER); + json_decref(val); + ASPRINTF(&sp, "Proxies:%s", s); + dealloc(s); + add_msg_entry(&char_list, &sp); + HASH_ITER(sh, proxy->subproxies, subproxy, subtmp) { + JSON_CPACK(val, "{sI,si,si,sI,sI,sf,sb}", + "id", subproxy->id, + "subid", subproxy->subid, + "nonce2len", subproxy->nonce2len, + "clients", subproxy->bound_clients, + "maxclients", subproxy->max_clients, + "diff", subproxy->diff, + "alive", !subproxy->dead); + s = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER); + json_decref(val); + ASPRINTF(&sp, "Subproxies:%s", s); + dealloc(s); + add_msg_entry(&char_list, &sp); + } + } + mutex_unlock(&sdata->proxy_lock); + info_msg_entries(&char_list); + } + + ts_realtime(&ts_now); + sprintf(cdfield, "%lu,%lu", ts_now.tv_sec, ts_now.tv_nsec); + JSON_CPACK(val, "{ss,si,si,si,sf,sf,sf,sf,ss,ss,ss,ss}", + "poolinstance", ckp->name, + "elapsed", diff.tv_sec, + "users", stats->users + stats->remote_users, + "workers", stats->workers + stats->remote_workers, + "hashrate", ghs1, + "hashrate5m", ghs5, + "hashrate1hr", ghs60, + "hashrate24hr", ghs1440, + "createdate", cdfield, + "createby", "code", + "createcode", __func__, + "createinet", ckp->serverurl[0]); + json_decref(val); + + /* Update stats 32 times per minute to divide up userstats, + * displaying status every minute. */ + for (i = 0; i < 32; i++) { + int64_t unaccounted_shares, + unaccounted_diff_shares, + unaccounted_rejects; + + ts_to_tv(&diff, &stats->last_update); + cksleep_ms_r(&stats->last_update, 1875); + cksleep_prepare_r(&stats->last_update); + ts_to_tv(&now, &stats->last_update); + /* Calculate how long it's really been for accurate + * stats update */ + per_tdiff = tvdiff(&now, &diff); + + mutex_lock(&sdata->uastats_lock); + unaccounted_shares = stats->unaccounted_shares; + unaccounted_diff_shares = stats->unaccounted_diff_shares; + unaccounted_rejects = stats->unaccounted_rejects; + stats->unaccounted_shares = + stats->unaccounted_diff_shares = + stats->unaccounted_rejects = 0; + mutex_unlock(&sdata->uastats_lock); + + mutex_lock(&sdata->stats_lock); + stats->accounted_shares += unaccounted_shares; + stats->accounted_diff_shares += unaccounted_diff_shares; + stats->accounted_rejects += unaccounted_rejects; + + decay_time(&stats->sps1, unaccounted_shares, per_tdiff, MIN1); + decay_time(&stats->sps5, unaccounted_shares, per_tdiff, MIN5); + decay_time(&stats->sps15, unaccounted_shares, per_tdiff, MIN15); + decay_time(&stats->sps60, unaccounted_shares, per_tdiff, HOUR); + + decay_time(&stats->dsps1, unaccounted_diff_shares, per_tdiff, MIN1); + decay_time(&stats->dsps5, unaccounted_diff_shares, per_tdiff, MIN5); + decay_time(&stats->dsps15, unaccounted_diff_shares, per_tdiff, MIN15); + decay_time(&stats->dsps60, unaccounted_diff_shares, per_tdiff, HOUR); + decay_time(&stats->dsps360, unaccounted_diff_shares, per_tdiff, HOUR6); + decay_time(&stats->dsps1440, unaccounted_diff_shares, per_tdiff, DAY); + decay_time(&stats->dsps10080, unaccounted_diff_shares, per_tdiff, WEEK); + mutex_unlock(&sdata->stats_lock); + } + + /* Reset remote workers every minute since we measure it once + * every minute only. */ + mutex_lock(&sdata->stats_lock); + stats->remote_workers = stats->remote_users = 0; + mutex_unlock(&sdata->stats_lock); + } + + return NULL; +} + +static void read_poolstats(ckpool_t *ckp, int *tvsec_diff) +{ + char *s = alloca(4096), *pstats, *dsps, *sps; + sdata_t *sdata = ckp->sdata; + pool_stats_t *stats = &sdata->stats; + tv_t now, last; + json_t *val; + FILE *fp; + int ret; + + snprintf(s, 4095, "%s/pool/pool.status", ckp->logdir); + fp = fopen(s, "re"); + if (!fp) { + LOGINFO("Pool does not have a logfile to read"); + return; + } + memset(s, 0, 4096); + ret = fread(s, 1, 4095, fp); + fclose(fp); + if (ret < 1 || !strlen(s)) { + LOGDEBUG("No string to read in pool logfile"); + return; + } + /* Strip out end of line terminators */ + pstats = strsep(&s, "\n"); + dsps = strsep(&s, "\n"); + sps = strsep(&s, "\n"); + if (!s) { + LOGINFO("Failed to find EOL in pool logfile"); + return; + } + val = json_loads(pstats, 0, NULL); + if (!val) { + LOGINFO("Failed to json decode pstats line from pool logfile: %s", pstats); + return; + } + tv_time(&now); + last.tv_sec = 0; + json_get_int64(&last.tv_sec, val, "lastupdate"); + json_decref(val); + LOGINFO("Successfully read pool pstats: %s", pstats); + + val = json_loads(dsps, 0, NULL); + if (!val) { + LOGINFO("Failed to json decode dsps line from pool logfile: %s", sps); + return; + } + stats->dsps1 = dsps_from_key(val, "hashrate1m"); + stats->dsps5 = dsps_from_key(val, "hashrate5m"); + stats->dsps15 = dsps_from_key(val, "hashrate15m"); + stats->dsps60 = dsps_from_key(val, "hashrate1hr"); + stats->dsps360 = dsps_from_key(val, "hashrate6hr"); + stats->dsps1440 = dsps_from_key(val, "hashrate1d"); + stats->dsps10080 = dsps_from_key(val, "hashrate7d"); + json_decref(val); + LOGINFO("Successfully read pool dsps: %s", dsps); + + val = json_loads(sps, 0, NULL); + if (!val) { + LOGINFO("Failed to json decode sps line from pool logfile: %s", dsps); + return; + } + json_get_double(&stats->sps1, val, "SPS1m"); + json_get_double(&stats->sps5, val, "SPS5m"); + json_get_double(&stats->sps15, val, "SPS15m"); + json_get_double(&stats->sps60, val, "SPS1h"); + json_get_int64(&stats->accounted_diff_shares, val, "accepted"); + json_get_int64(&stats->accounted_rejects, val, "rejected"); + json_get_double(&stats->best_diff, val, "bestshare"); + json_decref(val); + + LOGINFO("Successfully read pool sps: %s", sps); + if (last.tv_sec) + *tvsec_diff = now.tv_sec - last.tv_sec - 60; + if (*tvsec_diff > 60) { + LOGNOTICE("Old pool stats indicate pool down for %d seconds, decaying stats", + *tvsec_diff); + decay_time(&stats->sps1, 0, *tvsec_diff, MIN1); + decay_time(&stats->sps5, 0, *tvsec_diff, MIN5); + decay_time(&stats->sps15, 0, *tvsec_diff, MIN15); + decay_time(&stats->sps60, 0, *tvsec_diff, HOUR); + + decay_time(&stats->dsps1, 0, *tvsec_diff, MIN1); + decay_time(&stats->dsps5, 0, *tvsec_diff, MIN5); + decay_time(&stats->dsps15, 0, *tvsec_diff, MIN15); + decay_time(&stats->dsps60, 0, *tvsec_diff, HOUR); + decay_time(&stats->dsps360, 0, *tvsec_diff, HOUR6); + decay_time(&stats->dsps1440, 0, *tvsec_diff, DAY); + decay_time(&stats->dsps10080, 0, *tvsec_diff, WEEK); + } +} + +static char *status_chars = "|/-\\"; + +void *throbber(void *arg) +{ + ckpool_t *ckp = arg; + sdata_t *sdata = ckp->sdata; + int counter = 0; + + rename_proc("throbber"); + + while (42) { + double sdiff; + pool_stats_t *stats; + char stamp[128], hashrate[16], ch; + + sleep(1); + if (ckp->quiet) + continue; + sdiff = sdata->stats.accounted_diff_shares; + stats = &sdata->stats; + suffix_string(stats->dsps1 * nonces, hashrate, 16, 3); + ch = status_chars[(counter++) & 0x3]; + get_timestamp(stamp); + if (likely(sdata->current_workbase)) { + double bdiff = sdiff / sdata->current_workbase->network_diff * 100; + + fprintf(stdout, "\33[2K\r%s %c %sH/s %.1f SPS %d users %d workers %.0f shares %.1f%% diff", + stamp, ch, hashrate, stats->sps1, stats->users + stats->remote_users, + stats->workers + stats->remote_workers, sdiff, bdiff); + } else { + fprintf(stdout, "\33[2K\r%s %c %sH/s %.1f SPS %d users %d workers %.0f shares", + stamp, ch, hashrate, stats->sps1, stats->users + stats->remote_users, + stats->workers + stats->remote_workers, sdiff); + } + fflush(stdout); + } + + return NULL; +} + +static void *zmqnotify(void *arg) +{ +#ifdef HAVE_ZMQ_H + ckpool_t *ckp = arg; + sdata_t *sdata = ckp->sdata; + void *context, *notify; + int rc; + + rename_proc("zmqnotify"); + + context = zmq_ctx_new(); + notify = zmq_socket(context, ZMQ_SUB); + if (!notify) + quit(1, "zmq_socket failed with errno %d", errno); + rc = zmq_setsockopt(notify, ZMQ_SUBSCRIBE, "hashblock", 0); + if (rc < 0) + quit(1, "zmq_setsockopt failed with errno %d", errno); + rc = zmq_connect(notify, ckp->zmqblock); + if (rc < 0) + quit(1, "zmq_connect failed with errno %d", errno); + LOGNOTICE("ZMQ connected to %s", ckp->zmqblock); + + while (42) { + zmq_msg_t message; + + do { + char hexhash[68] = {}; + int size; + + zmq_msg_init(&message); + rc = zmq_msg_recv(&message, notify, 0); + if (unlikely(rc < 0)) { + LOGWARNING("zmq_msg_recv failed with error %d", errno); + sleep(5); + zmq_msg_close(&message); + continue; + } + + size = zmq_msg_size(&message); + switch (size) { + case 9: + LOGDEBUG("ZMQ hashblock message"); + break; + case 4: + LOGDEBUG("ZMQ sequence number"); + break; + case 32: + update_base(sdata, GEN_PRIORITY); + __bin2hex(hexhash, zmq_msg_data(&message), 32); + LOGNOTICE("ZMQ block hash %s", hexhash); + break; + default: + LOGWARNING("ZMQ message size error, size = %d!", size); + break; + } + zmq_msg_close(&message); + } while (zmq_msg_more(&message)); + + LOGDEBUG("ZMQ message complete"); + } + + zmq_close(notify); + zmq_ctx_destroy (context); +#endif + pthread_detach(pthread_self()); + + return NULL; +} + +void *stratifier(void *arg) +{ + pthread_t pth_blockupdate, pth_statsupdate, pth_throbber, pth_zmqnotify; + proc_instance_t *pi = (proc_instance_t *)arg; + int threads, tvsec_diff = 0; + ckpool_t *ckp = pi->ckp; + int64_t randomiser; + sdata_t *sdata; + + rename_proc(pi->processname); + LOGWARNING("%s stratifier starting", ckp->name); + sdata = ckzalloc(sizeof(sdata_t)); + ckp->sdata = sdata; + sdata->ckp = ckp; + sdata->verbose = true; + + /* Wait for the generator to have something for us */ + while (!ckp->proxy && !ckp->generator_ready) + cksleep_ms(10); + while (ckp->remote && !ckp->connector_ready) + cksleep_ms(10); + + if (!ckp->proxy) { + if (!generator_checkaddr(ckp, ckp->btcaddress, &ckp->script, &ckp->segwit)) { + LOGEMERG("Fatal: btcaddress invalid according to bitcoind"); + goto out; + } + + /* Store this for use elsewhere */ + hex2bin(scriptsig_header_bin, scriptsig_header, 41); + sdata->txnlen = address_to_txn(sdata->txnbin, ckp->btcaddress, ckp->script, ckp->segwit); + + /* Find a valid donation address if possible */ + if (generator_checkaddr(ckp, ckp->donaddress, &ckp->donscript, &ckp->donsegwit)) { + ckp->donvalid = true; + sdata->dontxnlen = address_to_txn(sdata->dontxnbin, ckp->donaddress, ckp->donscript, ckp->donsegwit); + LOGNOTICE("BTC donation address valid %s", ckp->donaddress); + } else if (generator_checkaddr(ckp, ckp->tndonaddress, &ckp->donscript, &ckp->donsegwit)) { + ckp->donaddress = ckp->tndonaddress; + ckp->donvalid = true; + sdata->dontxnlen = address_to_txn(sdata->dontxnbin, ckp->donaddress, ckp->donscript, ckp->donsegwit); + LOGNOTICE("BTC testnet donation address valid %s", ckp->donaddress); + } else if (generator_checkaddr(ckp, ckp->rtdonaddress, &ckp->donscript, &ckp->donsegwit)) { + ckp->donaddress = ckp->rtdonaddress; + ckp->donvalid = true; + sdata->dontxnlen = address_to_txn(sdata->dontxnbin, ckp->donaddress, ckp->donscript, ckp->donsegwit); + LOGNOTICE("BTC regtest donation address valid %s", ckp->donaddress); + } else + LOGNOTICE("No valid donation address found"); + } + + randomiser = time(NULL); + sdata->enonce1_64 = htole64(randomiser); + sdata->session_id = randomiser; + /* Set the initial id to time as high bits so as to not send the same + * id on restarts */ + randomiser <<= 32; + if (!ckp->proxy) + sdata->blockchange_id = sdata->workbase_id = randomiser; + + cklock_init(&sdata->instance_lock); + cksem_init(&sdata->update_sem); + cksem_post(&sdata->update_sem); + + /* Create half as many share processing and receiving threads as there + * are CPUs */ + threads = sysconf(_SC_NPROCESSORS_ONLN) / 2 ? : 1; + sdata->updateq = create_ckmsgq(ckp, "updater", &block_update); + sdata->sshareq = create_ckmsgqs(ckp, "sprocessor", &sshare_process, threads); + sdata->ssends = create_ckmsgqs(ckp, "ssender", &ssend_process, threads); + sdata->sauthq = create_ckmsgq(ckp, "authoriser", &sauth_process); + sdata->stxnq = create_ckmsgq(ckp, "stxnq", &send_transactions); + sdata->srecvs = create_ckmsgqs(ckp, "sreceiver", &srecv_process, threads); + create_pthread(&pth_throbber, throbber, ckp); + read_poolstats(ckp, &tvsec_diff); + read_userstats(ckp, sdata, tvsec_diff); + + /* Set diff impossibly large until we know the network diff */ + sdata->stats.network_diff = ~0ULL; + + cklock_init(&sdata->txn_lock); + cklock_init(&sdata->workbase_lock); + if (!ckp->proxy) + create_pthread(&pth_blockupdate, blockupdate, ckp); + else { + mutex_init(&sdata->proxy_lock); + } + + mutex_init(&sdata->stats_lock); + mutex_init(&sdata->uastats_lock); + if (!ckp->passthrough || ckp->node) + create_pthread(&pth_statsupdate, statsupdate, ckp); + + mutex_init(&sdata->share_lock); + if (!ckp->proxy) + create_pthread(&pth_zmqnotify, zmqnotify, ckp); + + ckp->stratifier_ready = true; + LOGWARNING("%s stratifier ready", ckp->name); + + stratum_loop(ckp, pi); +out: + /* We should never get here unless there's a fatal error */ + LOGEMERG("Stratifier failure, shutting down"); + exit(1); + return NULL; +} diff --git a/solo-ckpool-source/src/stratifier.h b/solo-ckpool-source/src/stratifier.h new file mode 100644 index 0000000..6a20d4f --- /dev/null +++ b/solo-ckpool-source/src/stratifier.h @@ -0,0 +1,102 @@ +/* + * Copyright 2014-2017,2023 Con Kolivas + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 3 of the License, or (at your option) + * any later version. See COPYING for more details. + */ + +#ifndef STRATIFIER_H +#define STRATIFIER_H + +/* Generic structure for both workbase in stratifier and gbtbase in generator */ +struct genwork { + /* Hash table data */ + UT_hash_handle hh; + + /* The next two fields need to be consecutive as both of them are + * used as the key for their hashtable entry in remote_workbases */ + int64_t id; + /* The client id this workinfo came from if remote */ + int64_t client_id; + + char idstring[20]; + + /* How many readers we currently have of this workbase, set + * under write workbase_lock */ + int readcount; + + /* The id a remote workinfo is mapped to locally */ + int64_t mapped_id; + + ts_t gentime; + tv_t retired; + + /* GBT/shared variables */ + char target[68]; + double diff; + double network_diff; + uint32_t version; + uint32_t curtime; + char prevhash[68]; + char ntime[12]; + uint32_t ntime32; + char bbversion[12]; + char nbit[12]; + uint64_t coinbasevalue; + int height; + char *flags; + int txns; + char *txn_data; + char *txn_hashes; + char witnessdata[80]; //null-terminated ascii + bool insert_witness; + int merkles; + char merklehash[16][68]; + char merklebin[16][32]; + json_t *merkle_array; + + /* Template variables, lengths are binary lengths! */ + char *coinb1; // coinbase1 + uchar *coinb1bin; + int coinb1len; // length of above + + char enonce1const[32]; // extranonce1 section that is constant + uchar enonce1constbin[16]; + int enonce1constlen; // length of above - usually zero unless proxying + int enonce1varlen; // length of unique extranonce1 string for each worker - usually 8 + + int enonce2varlen; // length of space left for extranonce2 - usually 8 unless proxying + + char *coinb2; // coinbase2 + uchar *coinb2bin; + int coinb2len; // length of above + char *coinb3bin; // coinbase3 for variable coinb2len + int coinb3len; // length of above + + /* Cached header binary */ + char headerbin[112]; + + char *logdir; + + ckpool_t *ckp; + bool proxy; /* This workbase is proxied work */ + + bool incomplete; /* This is a remote workinfo without all the txn data */ + + json_t *json; /* getblocktemplate json */ +}; + +void parse_remote_txns(ckpool_t *ckp, const json_t *val); +#define parse_upstream_txns(ckp, val) parse_remote_txns(ckp, val) +void parse_upstream_auth(ckpool_t *ckp, json_t *val); +void parse_upstream_workinfo(ckpool_t *ckp, json_t *val); +void parse_upstream_block(ckpool_t *ckp, json_t *val); +void parse_upstream_reqtxns(ckpool_t *ckp, json_t *val); +char *stratifier_stats(ckpool_t *ckp, void *data); +void _stratifier_add_recv(ckpool_t *ckp, json_t *val, const char *file, const char *func, const int line); +#define stratifier_add_recv(ckp, val) _stratifier_add_recv(ckp, val, __FILE__, __func__, __LINE__) +void *stratifier(void *arg); + +#endif /* STRATIFIER_H */ diff --git a/solo-ckpool-source/src/uthash.h b/solo-ckpool-source/src/uthash.h new file mode 100644 index 0000000..44de601 --- /dev/null +++ b/solo-ckpool-source/src/uthash.h @@ -0,0 +1,1144 @@ +/* +Copyright (c) 2003-2022, Troy D. Hanson https://troydhanson.github.io/uthash/ +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef UTHASH_H +#define UTHASH_H + +#define UTHASH_VERSION 2.3.0 + +#include /* memcmp, memset, strlen */ +#include /* ptrdiff_t */ +#include /* exit */ + +#if defined(HASH_DEFINE_OWN_STDINT) && HASH_DEFINE_OWN_STDINT +/* This codepath is provided for backward compatibility, but I plan to remove it. */ +#warning "HASH_DEFINE_OWN_STDINT is deprecated; please use HASH_NO_STDINT instead" +typedef unsigned int uint32_t; +typedef unsigned char uint8_t; +#elif defined(HASH_NO_STDINT) && HASH_NO_STDINT +#else +#include /* uint8_t, uint32_t */ +#endif + +/* These macros use decltype or the earlier __typeof GNU extension. + As decltype is only available in newer compilers (VS2010 or gcc 4.3+ + when compiling c++ source) this code uses whatever method is needed + or, for VS2008 where neither is available, uses casting workarounds. */ +#if !defined(DECLTYPE) && !defined(NO_DECLTYPE) +#if defined(_MSC_VER) /* MS compiler */ +#if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */ +#define DECLTYPE(x) (decltype(x)) +#else /* VS2008 or older (or VS2010 in C mode) */ +#define NO_DECLTYPE +#endif +#elif defined(__MCST__) /* Elbrus C Compiler */ +#define DECLTYPE(x) (__typeof(x)) +#elif defined(__BORLANDC__) || defined(__ICCARM__) || defined(__LCC__) || defined(__WATCOMC__) +#define NO_DECLTYPE +#else /* GNU, Sun and other compilers */ +#define DECLTYPE(x) (__typeof(x)) +#endif +#endif + +#ifdef NO_DECLTYPE +#define DECLTYPE(x) +#define DECLTYPE_ASSIGN(dst,src) \ +do { \ + char **_da_dst = (char**)(&(dst)); \ + *_da_dst = (char*)(src); \ +} while (0) +#else +#define DECLTYPE_ASSIGN(dst,src) \ +do { \ + (dst) = DECLTYPE(dst)(src); \ +} while (0) +#endif + +#ifndef uthash_malloc +#define uthash_malloc(sz) malloc(sz) /* malloc fcn */ +#endif +#ifndef uthash_free +#define uthash_free(ptr,sz) free(ptr) /* free fcn */ +#endif +#ifndef uthash_bzero +#define uthash_bzero(a,n) memset(a,'\0',n) +#endif +#ifndef uthash_strlen +#define uthash_strlen(s) strlen(s) +#endif + +#ifndef HASH_FUNCTION +#define HASH_FUNCTION(keyptr,keylen,hashv) HASH_JEN(keyptr, keylen, hashv) +#endif + +#ifndef HASH_KEYCMP +#define HASH_KEYCMP(a,b,n) memcmp(a,b,n) +#endif + +#ifndef uthash_noexpand_fyi +#define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */ +#endif +#ifndef uthash_expand_fyi +#define uthash_expand_fyi(tbl) /* can be defined to log expands */ +#endif + +#ifndef HASH_NONFATAL_OOM +#define HASH_NONFATAL_OOM 0 +#endif + +#if HASH_NONFATAL_OOM +/* malloc failures can be recovered from */ + +#ifndef uthash_nonfatal_oom +#define uthash_nonfatal_oom(obj) do {} while (0) /* non-fatal OOM error */ +#endif + +#define HASH_RECORD_OOM(oomed) do { (oomed) = 1; } while (0) +#define IF_HASH_NONFATAL_OOM(x) x + +#else +/* malloc failures result in lost memory, hash tables are unusable */ + +#ifndef uthash_fatal +#define uthash_fatal(msg) exit(-1) /* fatal OOM error */ +#endif + +#define HASH_RECORD_OOM(oomed) uthash_fatal("out of memory") +#define IF_HASH_NONFATAL_OOM(x) + +#endif + +/* initial number of buckets */ +#define HASH_INITIAL_NUM_BUCKETS 32U /* initial number of buckets */ +#define HASH_INITIAL_NUM_BUCKETS_LOG2 5U /* lg2 of initial number of buckets */ +#define HASH_BKT_CAPACITY_THRESH 10U /* expand when bucket count reaches */ + +/* calculate the element whose hash handle address is hhp */ +#define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho))) +/* calculate the hash handle from element address elp */ +#define HH_FROM_ELMT(tbl,elp) ((UT_hash_handle*)(void*)(((char*)(elp)) + ((tbl)->hho))) + +#define HASH_ROLLBACK_BKT(hh, head, itemptrhh) \ +do { \ + struct UT_hash_handle *_hd_hh_item = (itemptrhh); \ + unsigned _hd_bkt; \ + HASH_TO_BKT(_hd_hh_item->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \ + (head)->hh.tbl->buckets[_hd_bkt].count++; \ + _hd_hh_item->hh_next = NULL; \ + _hd_hh_item->hh_prev = NULL; \ +} while (0) + +#define HASH_VALUE(keyptr,keylen,hashv) \ +do { \ + HASH_FUNCTION(keyptr, keylen, hashv); \ +} while (0) + +#define HASH_FIND_BYHASHVALUE(hh,head,keyptr,keylen,hashval,out) \ +do { \ + (out) = NULL; \ + if (head) { \ + unsigned _hf_bkt; \ + HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _hf_bkt); \ + if (HASH_BLOOM_TEST((head)->hh.tbl, hashval) != 0) { \ + HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], keyptr, keylen, hashval, out); \ + } \ + } \ +} while (0) + +#define HASH_FIND(hh,head,keyptr,keylen,out) \ +do { \ + (out) = NULL; \ + if (head) { \ + unsigned _hf_hashv; \ + HASH_VALUE(keyptr, keylen, _hf_hashv); \ + HASH_FIND_BYHASHVALUE(hh, head, keyptr, keylen, _hf_hashv, out); \ + } \ +} while (0) + +#ifdef HASH_BLOOM +#define HASH_BLOOM_BITLEN (1UL << HASH_BLOOM) +#define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8UL) + (((HASH_BLOOM_BITLEN%8UL)!=0UL) ? 1UL : 0UL) +#define HASH_BLOOM_MAKE(tbl,oomed) \ +do { \ + (tbl)->bloom_nbits = HASH_BLOOM; \ + (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \ + if (!(tbl)->bloom_bv) { \ + HASH_RECORD_OOM(oomed); \ + } else { \ + uthash_bzero((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \ + (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \ + } \ +} while (0) + +#define HASH_BLOOM_FREE(tbl) \ +do { \ + uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \ +} while (0) + +#define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8U] |= (1U << ((idx)%8U))) +#define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8U] & (1U << ((idx)%8U))) + +#define HASH_BLOOM_ADD(tbl,hashv) \ + HASH_BLOOM_BITSET((tbl)->bloom_bv, ((hashv) & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U))) + +#define HASH_BLOOM_TEST(tbl,hashv) \ + HASH_BLOOM_BITTEST((tbl)->bloom_bv, ((hashv) & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U))) + +#else +#define HASH_BLOOM_MAKE(tbl,oomed) +#define HASH_BLOOM_FREE(tbl) +#define HASH_BLOOM_ADD(tbl,hashv) +#define HASH_BLOOM_TEST(tbl,hashv) (1) +#define HASH_BLOOM_BYTELEN 0U +#endif + +#define HASH_MAKE_TABLE(hh,head,oomed) \ +do { \ + (head)->hh.tbl = (UT_hash_table*)uthash_malloc(sizeof(UT_hash_table)); \ + if (!(head)->hh.tbl) { \ + HASH_RECORD_OOM(oomed); \ + } else { \ + uthash_bzero((head)->hh.tbl, sizeof(UT_hash_table)); \ + (head)->hh.tbl->tail = &((head)->hh); \ + (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \ + (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \ + (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \ + (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \ + HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \ + (head)->hh.tbl->signature = HASH_SIGNATURE; \ + if (!(head)->hh.tbl->buckets) { \ + HASH_RECORD_OOM(oomed); \ + uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ + } else { \ + uthash_bzero((head)->hh.tbl->buckets, \ + HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \ + HASH_BLOOM_MAKE((head)->hh.tbl, oomed); \ + IF_HASH_NONFATAL_OOM( \ + if (oomed) { \ + uthash_free((head)->hh.tbl->buckets, \ + HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \ + uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ + } \ + ) \ + } \ + } \ +} while (0) + +#define HASH_REPLACE_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,replaced,cmpfcn) \ +do { \ + (replaced) = NULL; \ + HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \ + if (replaced) { \ + HASH_DELETE(hh, head, replaced); \ + } \ + HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn); \ +} while (0) + +#define HASH_REPLACE_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add,replaced) \ +do { \ + (replaced) = NULL; \ + HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \ + if (replaced) { \ + HASH_DELETE(hh, head, replaced); \ + } \ + HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add); \ +} while (0) + +#define HASH_REPLACE(hh,head,fieldname,keylen_in,add,replaced) \ +do { \ + unsigned _hr_hashv; \ + HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \ + HASH_REPLACE_BYHASHVALUE(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced); \ +} while (0) + +#define HASH_REPLACE_INORDER(hh,head,fieldname,keylen_in,add,replaced,cmpfcn) \ +do { \ + unsigned _hr_hashv; \ + HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \ + HASH_REPLACE_BYHASHVALUE_INORDER(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced, cmpfcn); \ +} while (0) + +#define HASH_APPEND_LIST(hh, head, add) \ +do { \ + (add)->hh.next = NULL; \ + (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \ + (head)->hh.tbl->tail->next = (add); \ + (head)->hh.tbl->tail = &((add)->hh); \ +} while (0) + +#define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \ +do { \ + do { \ + if (cmpfcn(DECLTYPE(head)(_hs_iter), add) > 0) { \ + break; \ + } \ + } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \ +} while (0) + +#ifdef NO_DECLTYPE +#undef HASH_AKBI_INNER_LOOP +#define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \ +do { \ + char *_hs_saved_head = (char*)(head); \ + do { \ + DECLTYPE_ASSIGN(head, _hs_iter); \ + if (cmpfcn(head, add) > 0) { \ + DECLTYPE_ASSIGN(head, _hs_saved_head); \ + break; \ + } \ + DECLTYPE_ASSIGN(head, _hs_saved_head); \ + } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \ +} while (0) +#endif + +#if HASH_NONFATAL_OOM + +#define HASH_ADD_TO_TABLE(hh,head,keyptr,keylen_in,hashval,add,oomed) \ +do { \ + if (!(oomed)) { \ + unsigned _ha_bkt; \ + (head)->hh.tbl->num_items++; \ + HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \ + HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], hh, &(add)->hh, oomed); \ + if (oomed) { \ + HASH_ROLLBACK_BKT(hh, head, &(add)->hh); \ + HASH_DELETE_HH(hh, head, &(add)->hh); \ + (add)->hh.tbl = NULL; \ + uthash_nonfatal_oom(add); \ + } else { \ + HASH_BLOOM_ADD((head)->hh.tbl, hashval); \ + HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \ + } \ + } else { \ + (add)->hh.tbl = NULL; \ + uthash_nonfatal_oom(add); \ + } \ +} while (0) + +#else + +#define HASH_ADD_TO_TABLE(hh,head,keyptr,keylen_in,hashval,add,oomed) \ +do { \ + unsigned _ha_bkt; \ + (head)->hh.tbl->num_items++; \ + HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \ + HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], hh, &(add)->hh, oomed); \ + HASH_BLOOM_ADD((head)->hh.tbl, hashval); \ + HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \ +} while (0) + +#endif + + +#define HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh,head,keyptr,keylen_in,hashval,add,cmpfcn) \ +do { \ + IF_HASH_NONFATAL_OOM( int _ha_oomed = 0; ) \ + (add)->hh.hashv = (hashval); \ + (add)->hh.key = (char*) (keyptr); \ + (add)->hh.keylen = (unsigned) (keylen_in); \ + if (!(head)) { \ + (add)->hh.next = NULL; \ + (add)->hh.prev = NULL; \ + HASH_MAKE_TABLE(hh, add, _ha_oomed); \ + IF_HASH_NONFATAL_OOM( if (!_ha_oomed) { ) \ + (head) = (add); \ + IF_HASH_NONFATAL_OOM( } ) \ + } else { \ + void *_hs_iter = (head); \ + (add)->hh.tbl = (head)->hh.tbl; \ + HASH_AKBI_INNER_LOOP(hh, head, add, cmpfcn); \ + if (_hs_iter) { \ + (add)->hh.next = _hs_iter; \ + if (((add)->hh.prev = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev)) { \ + HH_FROM_ELMT((head)->hh.tbl, (add)->hh.prev)->next = (add); \ + } else { \ + (head) = (add); \ + } \ + HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev = (add); \ + } else { \ + HASH_APPEND_LIST(hh, head, add); \ + } \ + } \ + HASH_ADD_TO_TABLE(hh, head, keyptr, keylen_in, hashval, add, _ha_oomed); \ + HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE_INORDER"); \ +} while (0) + +#define HASH_ADD_KEYPTR_INORDER(hh,head,keyptr,keylen_in,add,cmpfcn) \ +do { \ + unsigned _hs_hashv; \ + HASH_VALUE(keyptr, keylen_in, _hs_hashv); \ + HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, keyptr, keylen_in, _hs_hashv, add, cmpfcn); \ +} while (0) + +#define HASH_ADD_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,cmpfcn) \ + HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn) + +#define HASH_ADD_INORDER(hh,head,fieldname,keylen_in,add,cmpfcn) \ + HASH_ADD_KEYPTR_INORDER(hh, head, &((add)->fieldname), keylen_in, add, cmpfcn) + +#define HASH_ADD_KEYPTR_BYHASHVALUE(hh,head,keyptr,keylen_in,hashval,add) \ +do { \ + IF_HASH_NONFATAL_OOM( int _ha_oomed = 0; ) \ + (add)->hh.hashv = (hashval); \ + (add)->hh.key = (const void*) (keyptr); \ + (add)->hh.keylen = (unsigned) (keylen_in); \ + if (!(head)) { \ + (add)->hh.next = NULL; \ + (add)->hh.prev = NULL; \ + HASH_MAKE_TABLE(hh, add, _ha_oomed); \ + IF_HASH_NONFATAL_OOM( if (!_ha_oomed) { ) \ + (head) = (add); \ + IF_HASH_NONFATAL_OOM( } ) \ + } else { \ + (add)->hh.tbl = (head)->hh.tbl; \ + HASH_APPEND_LIST(hh, head, add); \ + } \ + HASH_ADD_TO_TABLE(hh, head, keyptr, keylen_in, hashval, add, _ha_oomed); \ + HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE"); \ +} while (0) + +#define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \ +do { \ + unsigned _ha_hashv; \ + HASH_VALUE(keyptr, keylen_in, _ha_hashv); \ + HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, keyptr, keylen_in, _ha_hashv, add); \ +} while (0) + +#define HASH_ADD_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add) \ + HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add) + +#define HASH_ADD(hh,head,fieldname,keylen_in,add) \ + HASH_ADD_KEYPTR(hh, head, &((add)->fieldname), keylen_in, add) + +#define HASH_TO_BKT(hashv,num_bkts,bkt) \ +do { \ + bkt = ((hashv) & ((num_bkts) - 1U)); \ +} while (0) + +/* delete "delptr" from the hash table. + * "the usual" patch-up process for the app-order doubly-linked-list. + * The use of _hd_hh_del below deserves special explanation. + * These used to be expressed using (delptr) but that led to a bug + * if someone used the same symbol for the head and deletee, like + * HASH_DELETE(hh,users,users); + * We want that to work, but by changing the head (users) below + * we were forfeiting our ability to further refer to the deletee (users) + * in the patch-up process. Solution: use scratch space to + * copy the deletee pointer, then the latter references are via that + * scratch pointer rather than through the repointed (users) symbol. + */ +#define HASH_DELETE(hh,head,delptr) \ + HASH_DELETE_HH(hh, head, &(delptr)->hh) + +#define HASH_DELETE_HH(hh,head,delptrhh) \ +do { \ + const struct UT_hash_handle *_hd_hh_del = (delptrhh); \ + if ((_hd_hh_del->prev == NULL) && (_hd_hh_del->next == NULL)) { \ + HASH_BLOOM_FREE((head)->hh.tbl); \ + uthash_free((head)->hh.tbl->buckets, \ + (head)->hh.tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ + uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ + (head) = NULL; \ + } else { \ + unsigned _hd_bkt; \ + if (_hd_hh_del == (head)->hh.tbl->tail) { \ + (head)->hh.tbl->tail = HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->prev); \ + } \ + if (_hd_hh_del->prev != NULL) { \ + HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->prev)->next = _hd_hh_del->next; \ + } else { \ + DECLTYPE_ASSIGN(head, _hd_hh_del->next); \ + } \ + if (_hd_hh_del->next != NULL) { \ + HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->next)->prev = _hd_hh_del->prev; \ + } \ + HASH_TO_BKT(_hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \ + HASH_DEL_IN_BKT((head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \ + (head)->hh.tbl->num_items--; \ + } \ + HASH_FSCK(hh, head, "HASH_DELETE_HH"); \ +} while (0) + +/* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */ +#define HASH_FIND_STR(head,findstr,out) \ +do { \ + unsigned _uthash_hfstr_keylen = (unsigned)uthash_strlen(findstr); \ + HASH_FIND(hh, head, findstr, _uthash_hfstr_keylen, out); \ +} while (0) +#define HASH_ADD_STR(head,strfield,add) \ +do { \ + unsigned _uthash_hastr_keylen = (unsigned)uthash_strlen((add)->strfield); \ + HASH_ADD(hh, head, strfield[0], _uthash_hastr_keylen, add); \ +} while (0) +#define HASH_REPLACE_STR(head,strfield,add,replaced) \ +do { \ + unsigned _uthash_hrstr_keylen = (unsigned)uthash_strlen((add)->strfield); \ + HASH_REPLACE(hh, head, strfield[0], _uthash_hrstr_keylen, add, replaced); \ +} while (0) +#define HASH_FIND_INT(head,findint,out) \ + HASH_FIND(hh,head,findint,sizeof(int),out) +#define HASH_ADD_INT(head,intfield,add) \ + HASH_ADD(hh,head,intfield,sizeof(int),add) +#define HASH_REPLACE_INT(head,intfield,add,replaced) \ + HASH_REPLACE(hh,head,intfield,sizeof(int),add,replaced) +#define HASH_FIND_I64(head,findint,out) \ + HASH_FIND(hh,head,findint,sizeof(int64_t),out) +#define HASH_ADD_I64(head,intfield,add) \ + HASH_ADD(hh,head,intfield,sizeof(int64_t),add) +#define HASH_FIND_PTR(head,findptr,out) \ + HASH_FIND(hh,head,findptr,sizeof(void *),out) +#define HASH_ADD_PTR(head,ptrfield,add) \ + HASH_ADD(hh,head,ptrfield,sizeof(void *),add) +#define HASH_REPLACE_PTR(head,ptrfield,add,replaced) \ + HASH_REPLACE(hh,head,ptrfield,sizeof(void *),add,replaced) +#define HASH_DEL(head,delptr) \ + HASH_DELETE(hh,head,delptr) + +/* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined. + * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined. + */ +#ifdef HASH_DEBUG +#include /* fprintf, stderr */ +#define HASH_OOPS(...) do { fprintf(stderr, __VA_ARGS__); exit(-1); } while (0) +#define HASH_FSCK(hh,head,where) \ +do { \ + struct UT_hash_handle *_thh; \ + if (head) { \ + unsigned _bkt_i; \ + unsigned _count = 0; \ + char *_prev; \ + for (_bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; ++_bkt_i) { \ + unsigned _bkt_count = 0; \ + _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \ + _prev = NULL; \ + while (_thh) { \ + if (_prev != (char*)(_thh->hh_prev)) { \ + HASH_OOPS("%s: invalid hh_prev %p, actual %p\n", \ + (where), (void*)_thh->hh_prev, (void*)_prev); \ + } \ + _bkt_count++; \ + _prev = (char*)(_thh); \ + _thh = _thh->hh_next; \ + } \ + _count += _bkt_count; \ + if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \ + HASH_OOPS("%s: invalid bucket count %u, actual %u\n", \ + (where), (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \ + } \ + } \ + if (_count != (head)->hh.tbl->num_items) { \ + HASH_OOPS("%s: invalid hh item count %u, actual %u\n", \ + (where), (head)->hh.tbl->num_items, _count); \ + } \ + _count = 0; \ + _prev = NULL; \ + _thh = &(head)->hh; \ + while (_thh) { \ + _count++; \ + if (_prev != (char*)_thh->prev) { \ + HASH_OOPS("%s: invalid prev %p, actual %p\n", \ + (where), (void*)_thh->prev, (void*)_prev); \ + } \ + _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \ + _thh = (_thh->next ? HH_FROM_ELMT((head)->hh.tbl, _thh->next) : NULL); \ + } \ + if (_count != (head)->hh.tbl->num_items) { \ + HASH_OOPS("%s: invalid app item count %u, actual %u\n", \ + (where), (head)->hh.tbl->num_items, _count); \ + } \ + } \ +} while (0) +#else +#define HASH_FSCK(hh,head,where) +#endif + +/* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to + * the descriptor to which this macro is defined for tuning the hash function. + * The app can #include to get the prototype for write(2). */ +#ifdef HASH_EMIT_KEYS +#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \ +do { \ + unsigned _klen = fieldlen; \ + write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \ + write(HASH_EMIT_KEYS, keyptr, (unsigned long)fieldlen); \ +} while (0) +#else +#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) +#endif + +/* The Bernstein hash function, used in Perl prior to v5.6. Note (x<<5+x)=x*33. */ +#define HASH_BER(key,keylen,hashv) \ +do { \ + unsigned _hb_keylen = (unsigned)keylen; \ + const unsigned char *_hb_key = (const unsigned char*)(key); \ + (hashv) = 0; \ + while (_hb_keylen-- != 0U) { \ + (hashv) = (((hashv) << 5) + (hashv)) + *_hb_key++; \ + } \ +} while (0) + + +/* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at + * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx + * (archive link: https://archive.is/Ivcan ) + */ +#define HASH_SAX(key,keylen,hashv) \ +do { \ + unsigned _sx_i; \ + const unsigned char *_hs_key = (const unsigned char*)(key); \ + hashv = 0; \ + for (_sx_i=0; _sx_i < keylen; _sx_i++) { \ + hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \ + } \ +} while (0) +/* FNV-1a variation */ +#define HASH_FNV(key,keylen,hashv) \ +do { \ + unsigned _fn_i; \ + const unsigned char *_hf_key = (const unsigned char*)(key); \ + (hashv) = 2166136261U; \ + for (_fn_i=0; _fn_i < keylen; _fn_i++) { \ + hashv = hashv ^ _hf_key[_fn_i]; \ + hashv = hashv * 16777619U; \ + } \ +} while (0) + +#define HASH_OAT(key,keylen,hashv) \ +do { \ + unsigned _ho_i; \ + const unsigned char *_ho_key=(const unsigned char*)(key); \ + hashv = 0; \ + for(_ho_i=0; _ho_i < keylen; _ho_i++) { \ + hashv += _ho_key[_ho_i]; \ + hashv += (hashv << 10); \ + hashv ^= (hashv >> 6); \ + } \ + hashv += (hashv << 3); \ + hashv ^= (hashv >> 11); \ + hashv += (hashv << 15); \ +} while (0) + +#define HASH_JEN_MIX(a,b,c) \ +do { \ + a -= b; a -= c; a ^= ( c >> 13 ); \ + b -= c; b -= a; b ^= ( a << 8 ); \ + c -= a; c -= b; c ^= ( b >> 13 ); \ + a -= b; a -= c; a ^= ( c >> 12 ); \ + b -= c; b -= a; b ^= ( a << 16 ); \ + c -= a; c -= b; c ^= ( b >> 5 ); \ + a -= b; a -= c; a ^= ( c >> 3 ); \ + b -= c; b -= a; b ^= ( a << 10 ); \ + c -= a; c -= b; c ^= ( b >> 15 ); \ +} while (0) + +#define HASH_JEN(key,keylen,hashv) \ +do { \ + unsigned _hj_i,_hj_j,_hj_k; \ + unsigned const char *_hj_key=(unsigned const char*)(key); \ + hashv = 0xfeedbeefu; \ + _hj_i = _hj_j = 0x9e3779b9u; \ + _hj_k = (unsigned)(keylen); \ + while (_hj_k >= 12U) { \ + _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \ + + ( (unsigned)_hj_key[2] << 16 ) \ + + ( (unsigned)_hj_key[3] << 24 ) ); \ + _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \ + + ( (unsigned)_hj_key[6] << 16 ) \ + + ( (unsigned)_hj_key[7] << 24 ) ); \ + hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \ + + ( (unsigned)_hj_key[10] << 16 ) \ + + ( (unsigned)_hj_key[11] << 24 ) ); \ + \ + HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ + \ + _hj_key += 12; \ + _hj_k -= 12U; \ + } \ + hashv += (unsigned)(keylen); \ + switch ( _hj_k ) { \ + case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); /* FALLTHROUGH */ \ + case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); /* FALLTHROUGH */ \ + case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); /* FALLTHROUGH */ \ + case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); /* FALLTHROUGH */ \ + case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); /* FALLTHROUGH */ \ + case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); /* FALLTHROUGH */ \ + case 5: _hj_j += _hj_key[4]; /* FALLTHROUGH */ \ + case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); /* FALLTHROUGH */ \ + case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); /* FALLTHROUGH */ \ + case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); /* FALLTHROUGH */ \ + case 1: _hj_i += _hj_key[0]; /* FALLTHROUGH */ \ + default: ; \ + } \ + HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ +} while (0) + +/* The Paul Hsieh hash function */ +#undef get16bits +#if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \ + || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__) +#define get16bits(d) (*((const uint16_t *) (d))) +#endif + +#if !defined (get16bits) +#define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \ + +(uint32_t)(((const uint8_t *)(d))[0]) ) +#endif +#define HASH_SFH(key,keylen,hashv) \ +do { \ + unsigned const char *_sfh_key=(unsigned const char*)(key); \ + uint32_t _sfh_tmp, _sfh_len = (uint32_t)keylen; \ + \ + unsigned _sfh_rem = _sfh_len & 3U; \ + _sfh_len >>= 2; \ + hashv = 0xcafebabeu; \ + \ + /* Main loop */ \ + for (;_sfh_len > 0U; _sfh_len--) { \ + hashv += get16bits (_sfh_key); \ + _sfh_tmp = ((uint32_t)(get16bits (_sfh_key+2)) << 11) ^ hashv; \ + hashv = (hashv << 16) ^ _sfh_tmp; \ + _sfh_key += 2U*sizeof (uint16_t); \ + hashv += hashv >> 11; \ + } \ + \ + /* Handle end cases */ \ + switch (_sfh_rem) { \ + case 3: hashv += get16bits (_sfh_key); \ + hashv ^= hashv << 16; \ + hashv ^= (uint32_t)(_sfh_key[sizeof (uint16_t)]) << 18; \ + hashv += hashv >> 11; \ + break; \ + case 2: hashv += get16bits (_sfh_key); \ + hashv ^= hashv << 11; \ + hashv += hashv >> 17; \ + break; \ + case 1: hashv += *_sfh_key; \ + hashv ^= hashv << 10; \ + hashv += hashv >> 1; \ + break; \ + default: ; \ + } \ + \ + /* Force "avalanching" of final 127 bits */ \ + hashv ^= hashv << 3; \ + hashv += hashv >> 5; \ + hashv ^= hashv << 4; \ + hashv += hashv >> 17; \ + hashv ^= hashv << 25; \ + hashv += hashv >> 6; \ +} while (0) + +/* iterate over items in a known bucket to find desired item */ +#define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,hashval,out) \ +do { \ + if ((head).hh_head != NULL) { \ + DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (head).hh_head)); \ + } else { \ + (out) = NULL; \ + } \ + while ((out) != NULL) { \ + if ((out)->hh.hashv == (hashval) && (out)->hh.keylen == (keylen_in)) { \ + if (HASH_KEYCMP((out)->hh.key, keyptr, keylen_in) == 0) { \ + break; \ + } \ + } \ + if ((out)->hh.hh_next != NULL) { \ + DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (out)->hh.hh_next)); \ + } else { \ + (out) = NULL; \ + } \ + } \ +} while (0) + +/* add an item to a bucket */ +#define HASH_ADD_TO_BKT(head,hh,addhh,oomed) \ +do { \ + UT_hash_bucket *_ha_head = &(head); \ + _ha_head->count++; \ + (addhh)->hh_next = _ha_head->hh_head; \ + (addhh)->hh_prev = NULL; \ + if (_ha_head->hh_head != NULL) { \ + _ha_head->hh_head->hh_prev = (addhh); \ + } \ + _ha_head->hh_head = (addhh); \ + if ((_ha_head->count >= ((_ha_head->expand_mult + 1U) * HASH_BKT_CAPACITY_THRESH)) \ + && !(addhh)->tbl->noexpand) { \ + HASH_EXPAND_BUCKETS(addhh,(addhh)->tbl, oomed); \ + IF_HASH_NONFATAL_OOM( \ + if (oomed) { \ + HASH_DEL_IN_BKT(head,addhh); \ + } \ + ) \ + } \ +} while (0) + +/* remove an item from a given bucket */ +#define HASH_DEL_IN_BKT(head,delhh) \ +do { \ + UT_hash_bucket *_hd_head = &(head); \ + _hd_head->count--; \ + if (_hd_head->hh_head == (delhh)) { \ + _hd_head->hh_head = (delhh)->hh_next; \ + } \ + if ((delhh)->hh_prev) { \ + (delhh)->hh_prev->hh_next = (delhh)->hh_next; \ + } \ + if ((delhh)->hh_next) { \ + (delhh)->hh_next->hh_prev = (delhh)->hh_prev; \ + } \ +} while (0) + +/* Bucket expansion has the effect of doubling the number of buckets + * and redistributing the items into the new buckets. Ideally the + * items will distribute more or less evenly into the new buckets + * (the extent to which this is true is a measure of the quality of + * the hash function as it applies to the key domain). + * + * With the items distributed into more buckets, the chain length + * (item count) in each bucket is reduced. Thus by expanding buckets + * the hash keeps a bound on the chain length. This bounded chain + * length is the essence of how a hash provides constant time lookup. + * + * The calculation of tbl->ideal_chain_maxlen below deserves some + * explanation. First, keep in mind that we're calculating the ideal + * maximum chain length based on the *new* (doubled) bucket count. + * In fractions this is just n/b (n=number of items,b=new num buckets). + * Since the ideal chain length is an integer, we want to calculate + * ceil(n/b). We don't depend on floating point arithmetic in this + * hash, so to calculate ceil(n/b) with integers we could write + * + * ceil(n/b) = (n/b) + ((n%b)?1:0) + * + * and in fact a previous version of this hash did just that. + * But now we have improved things a bit by recognizing that b is + * always a power of two. We keep its base 2 log handy (call it lb), + * so now we can write this with a bit shift and logical AND: + * + * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0) + * + */ +#define HASH_EXPAND_BUCKETS(hh,tbl,oomed) \ +do { \ + unsigned _he_bkt; \ + unsigned _he_bkt_i; \ + struct UT_hash_handle *_he_thh, *_he_hh_nxt; \ + UT_hash_bucket *_he_new_buckets, *_he_newbkt; \ + _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \ + sizeof(struct UT_hash_bucket) * (tbl)->num_buckets * 2U); \ + if (!_he_new_buckets) { \ + HASH_RECORD_OOM(oomed); \ + } else { \ + uthash_bzero(_he_new_buckets, \ + sizeof(struct UT_hash_bucket) * (tbl)->num_buckets * 2U); \ + (tbl)->ideal_chain_maxlen = \ + ((tbl)->num_items >> ((tbl)->log2_num_buckets+1U)) + \ + ((((tbl)->num_items & (((tbl)->num_buckets*2U)-1U)) != 0U) ? 1U : 0U); \ + (tbl)->nonideal_items = 0; \ + for (_he_bkt_i = 0; _he_bkt_i < (tbl)->num_buckets; _he_bkt_i++) { \ + _he_thh = (tbl)->buckets[ _he_bkt_i ].hh_head; \ + while (_he_thh != NULL) { \ + _he_hh_nxt = _he_thh->hh_next; \ + HASH_TO_BKT(_he_thh->hashv, (tbl)->num_buckets * 2U, _he_bkt); \ + _he_newbkt = &(_he_new_buckets[_he_bkt]); \ + if (++(_he_newbkt->count) > (tbl)->ideal_chain_maxlen) { \ + (tbl)->nonideal_items++; \ + if (_he_newbkt->count > _he_newbkt->expand_mult * (tbl)->ideal_chain_maxlen) { \ + _he_newbkt->expand_mult++; \ + } \ + } \ + _he_thh->hh_prev = NULL; \ + _he_thh->hh_next = _he_newbkt->hh_head; \ + if (_he_newbkt->hh_head != NULL) { \ + _he_newbkt->hh_head->hh_prev = _he_thh; \ + } \ + _he_newbkt->hh_head = _he_thh; \ + _he_thh = _he_hh_nxt; \ + } \ + } \ + uthash_free((tbl)->buckets, (tbl)->num_buckets * sizeof(struct UT_hash_bucket)); \ + (tbl)->num_buckets *= 2U; \ + (tbl)->log2_num_buckets++; \ + (tbl)->buckets = _he_new_buckets; \ + (tbl)->ineff_expands = ((tbl)->nonideal_items > ((tbl)->num_items >> 1)) ? \ + ((tbl)->ineff_expands+1U) : 0U; \ + if ((tbl)->ineff_expands > 1U) { \ + (tbl)->noexpand = 1; \ + uthash_noexpand_fyi(tbl); \ + } \ + uthash_expand_fyi(tbl); \ + } \ +} while (0) + + +/* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */ +/* Note that HASH_SORT assumes the hash handle name to be hh. + * HASH_SRT was added to allow the hash handle name to be passed in. */ +#define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn) +#define HASH_SRT(hh,head,cmpfcn) \ +do { \ + unsigned _hs_i; \ + unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \ + struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \ + if (head != NULL) { \ + _hs_insize = 1; \ + _hs_looping = 1; \ + _hs_list = &((head)->hh); \ + while (_hs_looping != 0U) { \ + _hs_p = _hs_list; \ + _hs_list = NULL; \ + _hs_tail = NULL; \ + _hs_nmerges = 0; \ + while (_hs_p != NULL) { \ + _hs_nmerges++; \ + _hs_q = _hs_p; \ + _hs_psize = 0; \ + for (_hs_i = 0; _hs_i < _hs_insize; ++_hs_i) { \ + _hs_psize++; \ + _hs_q = ((_hs_q->next != NULL) ? \ + HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \ + if (_hs_q == NULL) { \ + break; \ + } \ + } \ + _hs_qsize = _hs_insize; \ + while ((_hs_psize != 0U) || ((_hs_qsize != 0U) && (_hs_q != NULL))) { \ + if (_hs_psize == 0U) { \ + _hs_e = _hs_q; \ + _hs_q = ((_hs_q->next != NULL) ? \ + HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \ + _hs_qsize--; \ + } else if ((_hs_qsize == 0U) || (_hs_q == NULL)) { \ + _hs_e = _hs_p; \ + if (_hs_p != NULL) { \ + _hs_p = ((_hs_p->next != NULL) ? \ + HH_FROM_ELMT((head)->hh.tbl, _hs_p->next) : NULL); \ + } \ + _hs_psize--; \ + } else if ((cmpfcn( \ + DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl, _hs_p)), \ + DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl, _hs_q)) \ + )) <= 0) { \ + _hs_e = _hs_p; \ + if (_hs_p != NULL) { \ + _hs_p = ((_hs_p->next != NULL) ? \ + HH_FROM_ELMT((head)->hh.tbl, _hs_p->next) : NULL); \ + } \ + _hs_psize--; \ + } else { \ + _hs_e = _hs_q; \ + _hs_q = ((_hs_q->next != NULL) ? \ + HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \ + _hs_qsize--; \ + } \ + if ( _hs_tail != NULL ) { \ + _hs_tail->next = ((_hs_e != NULL) ? \ + ELMT_FROM_HH((head)->hh.tbl, _hs_e) : NULL); \ + } else { \ + _hs_list = _hs_e; \ + } \ + if (_hs_e != NULL) { \ + _hs_e->prev = ((_hs_tail != NULL) ? \ + ELMT_FROM_HH((head)->hh.tbl, _hs_tail) : NULL); \ + } \ + _hs_tail = _hs_e; \ + } \ + _hs_p = _hs_q; \ + } \ + if (_hs_tail != NULL) { \ + _hs_tail->next = NULL; \ + } \ + if (_hs_nmerges <= 1U) { \ + _hs_looping = 0; \ + (head)->hh.tbl->tail = _hs_tail; \ + DECLTYPE_ASSIGN(head, ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \ + } \ + _hs_insize *= 2U; \ + } \ + HASH_FSCK(hh, head, "HASH_SRT"); \ + } \ +} while (0) + +/* This function selects items from one hash into another hash. + * The end result is that the selected items have dual presence + * in both hashes. There is no copy of the items made; rather + * they are added into the new hash through a secondary hash + * hash handle that must be present in the structure. */ +#define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \ +do { \ + unsigned _src_bkt, _dst_bkt; \ + void *_last_elt = NULL, *_elt; \ + UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \ + ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \ + if ((src) != NULL) { \ + for (_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \ + for (_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \ + _src_hh != NULL; \ + _src_hh = _src_hh->hh_next) { \ + _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \ + if (cond(_elt)) { \ + IF_HASH_NONFATAL_OOM( int _hs_oomed = 0; ) \ + _dst_hh = (UT_hash_handle*)(void*)(((char*)_elt) + _dst_hho); \ + _dst_hh->key = _src_hh->key; \ + _dst_hh->keylen = _src_hh->keylen; \ + _dst_hh->hashv = _src_hh->hashv; \ + _dst_hh->prev = _last_elt; \ + _dst_hh->next = NULL; \ + if (_last_elt_hh != NULL) { \ + _last_elt_hh->next = _elt; \ + } \ + if ((dst) == NULL) { \ + DECLTYPE_ASSIGN(dst, _elt); \ + HASH_MAKE_TABLE(hh_dst, dst, _hs_oomed); \ + IF_HASH_NONFATAL_OOM( \ + if (_hs_oomed) { \ + uthash_nonfatal_oom(_elt); \ + (dst) = NULL; \ + continue; \ + } \ + ) \ + } else { \ + _dst_hh->tbl = (dst)->hh_dst.tbl; \ + } \ + HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \ + HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt], hh_dst, _dst_hh, _hs_oomed); \ + (dst)->hh_dst.tbl->num_items++; \ + IF_HASH_NONFATAL_OOM( \ + if (_hs_oomed) { \ + HASH_ROLLBACK_BKT(hh_dst, dst, _dst_hh); \ + HASH_DELETE_HH(hh_dst, dst, _dst_hh); \ + _dst_hh->tbl = NULL; \ + uthash_nonfatal_oom(_elt); \ + continue; \ + } \ + ) \ + HASH_BLOOM_ADD(_dst_hh->tbl, _dst_hh->hashv); \ + _last_elt = _elt; \ + _last_elt_hh = _dst_hh; \ + } \ + } \ + } \ + } \ + HASH_FSCK(hh_dst, dst, "HASH_SELECT"); \ +} while (0) + +#define HASH_CLEAR(hh,head) \ +do { \ + if ((head) != NULL) { \ + HASH_BLOOM_FREE((head)->hh.tbl); \ + uthash_free((head)->hh.tbl->buckets, \ + (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \ + uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ + (head) = NULL; \ + } \ +} while (0) + +#define HASH_OVERHEAD(hh,head) \ + (((head) != NULL) ? ( \ + (size_t)(((head)->hh.tbl->num_items * sizeof(UT_hash_handle)) + \ + ((head)->hh.tbl->num_buckets * sizeof(UT_hash_bucket)) + \ + sizeof(UT_hash_table) + \ + (HASH_BLOOM_BYTELEN))) : 0U) + +#ifdef NO_DECLTYPE +#define HASH_ITER(hh,head,el,tmp) \ +for(((el)=(head)), ((*(char**)(&(tmp)))=(char*)((head!=NULL)?(head)->hh.next:NULL)); \ + (el) != NULL; ((el)=(tmp)), ((*(char**)(&(tmp)))=(char*)((tmp!=NULL)?(tmp)->hh.next:NULL))) +#else +#define HASH_ITER(hh,head,el,tmp) \ +for(((el)=(head)), ((tmp)=DECLTYPE(el)((head!=NULL)?(head)->hh.next:NULL)); \ + (el) != NULL; ((el)=(tmp)), ((tmp)=DECLTYPE(el)((tmp!=NULL)?(tmp)->hh.next:NULL))) +#endif + +/* obtain a count of items in the hash */ +#define HASH_COUNT(head) HASH_CNT(hh,head) +#define HASH_CNT(hh,head) ((head != NULL)?((head)->hh.tbl->num_items):0U) + +typedef struct UT_hash_bucket { + struct UT_hash_handle *hh_head; + unsigned count; + + /* expand_mult is normally set to 0. In this situation, the max chain length + * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If + * the bucket's chain exceeds this length, bucket expansion is triggered). + * However, setting expand_mult to a non-zero value delays bucket expansion + * (that would be triggered by additions to this particular bucket) + * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH. + * (The multiplier is simply expand_mult+1). The whole idea of this + * multiplier is to reduce bucket expansions, since they are expensive, in + * situations where we know that a particular bucket tends to be overused. + * It is better to let its chain length grow to a longer yet-still-bounded + * value, than to do an O(n) bucket expansion too often. + */ + unsigned expand_mult; + +} UT_hash_bucket; + +/* random signature used only to find hash tables in external analysis */ +#define HASH_SIGNATURE 0xa0111fe1u +#define HASH_BLOOM_SIGNATURE 0xb12220f2u + +typedef struct UT_hash_table { + UT_hash_bucket *buckets; + unsigned num_buckets, log2_num_buckets; + unsigned num_items; + struct UT_hash_handle *tail; /* tail hh in app order, for fast append */ + ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */ + + /* in an ideal situation (all buckets used equally), no bucket would have + * more than ceil(#items/#buckets) items. that's the ideal chain length. */ + unsigned ideal_chain_maxlen; + + /* nonideal_items is the number of items in the hash whose chain position + * exceeds the ideal chain maxlen. these items pay the penalty for an uneven + * hash distribution; reaching them in a chain traversal takes >ideal steps */ + unsigned nonideal_items; + + /* ineffective expands occur when a bucket doubling was performed, but + * afterward, more than half the items in the hash had nonideal chain + * positions. If this happens on two consecutive expansions we inhibit any + * further expansion, as it's not helping; this happens when the hash + * function isn't a good fit for the key domain. When expansion is inhibited + * the hash will still work, albeit no longer in constant time. */ + unsigned ineff_expands, noexpand; + + uint32_t signature; /* used only to find hash tables in external analysis */ +#ifdef HASH_BLOOM + uint32_t bloom_sig; /* used only to test bloom exists in external analysis */ + uint8_t *bloom_bv; + uint8_t bloom_nbits; +#endif + +} UT_hash_table; + +typedef struct UT_hash_handle { + struct UT_hash_table *tbl; + void *prev; /* prev element in app order */ + void *next; /* next element in app order */ + struct UT_hash_handle *hh_prev; /* previous hh in bucket order */ + struct UT_hash_handle *hh_next; /* next hh in bucket order */ + const void *key; /* ptr to enclosing struct's key */ + unsigned keylen; /* enclosing struct's key len */ + unsigned hashv; /* result of hash-fcn(key) */ +} UT_hash_handle; + +#endif /* UTHASH_H */ diff --git a/solo-ckpool-source/src/utlist.h b/solo-ckpool-source/src/utlist.h new file mode 100644 index 0000000..48a8c7d --- /dev/null +++ b/solo-ckpool-source/src/utlist.h @@ -0,0 +1,757 @@ +/* +Copyright (c) 2007-2014, Troy D. Hanson http://troydhanson.github.com/uthash/ +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER +OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef UTLIST_H +#define UTLIST_H + +#define UTLIST_VERSION 1.9.9 + +#include + +/* + * This file contains macros to manipulate singly and doubly-linked lists. + * + * 1. LL_ macros: singly-linked lists. + * 2. DL_ macros: doubly-linked lists. + * 3. CDL_ macros: circular doubly-linked lists. + * + * To use singly-linked lists, your structure must have a "next" pointer. + * To use doubly-linked lists, your structure must "prev" and "next" pointers. + * Either way, the pointer to the head of the list must be initialized to NULL. + * + * ----------------.EXAMPLE ------------------------- + * struct item { + * int id; + * struct item *prev, *next; + * } + * + * struct item *list = NULL: + * + * int main() { + * struct item *item; + * ... allocate and populate item ... + * DL_APPEND(list, item); + * } + * -------------------------------------------------- + * + * For doubly-linked lists, the append and delete macros are O(1) + * For singly-linked lists, append and delete are O(n) but prepend is O(1) + * The sort macro is O(n log(n)) for all types of single/double/circular lists. + */ + +/* These macros use decltype or the earlier __typeof GNU extension. + As decltype is only available in newer compilers (VS2010 or gcc 4.3+ + when compiling c++ code), this code uses whatever method is needed + or, for VS2008 where neither is available, uses casting workarounds. */ +#ifdef _MSC_VER /* MS compiler */ +#if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */ +#define LDECLTYPE(x) decltype(x) +#else /* VS2008 or older (or VS2010 in C mode) */ +#define NO_DECLTYPE +#define LDECLTYPE(x) char* +#endif +#elif defined(__ICCARM__) +#define NO_DECLTYPE +#define LDECLTYPE(x) char* +#else /* GNU, Sun and other compilers */ +#define LDECLTYPE(x) __typeof(x) +#endif + +/* for VS2008 we use some workarounds to get around the lack of decltype, + * namely, we always reassign our tmp variable to the list head if we need + * to dereference its prev/next pointers, and save/restore the real head.*/ +#ifdef NO_DECLTYPE +#define _SV(elt,list) _tmp = (char*)(list); {char **_alias = (char**)&(list); *_alias = (elt); } +#define _NEXT(elt,list,next) ((char*)((list)->next)) +#define _NEXTASGN(elt,list,to,next) { char **_alias = (char**)&((list)->next); *_alias=(char*)(to); } +/* #define _PREV(elt,list,prev) ((char*)((list)->prev)) */ +#define _PREVASGN(elt,list,to,prev) { char **_alias = (char**)&((list)->prev); *_alias=(char*)(to); } +#define _RS(list) { char **_alias = (char**)&(list); *_alias=_tmp; } +#define _CASTASGN(a,b) { char **_alias = (char**)&(a); *_alias=(char*)(b); } +#else +#define _SV(elt,list) +#define _NEXT(elt,list,next) ((elt)->next) +#define _NEXTASGN(elt,list,to,next) ((elt)->next)=(to) +/* #define _PREV(elt,list,prev) ((elt)->prev) */ +#define _PREVASGN(elt,list,to,prev) ((elt)->prev)=(to) +#define _RS(list) +#define _CASTASGN(a,b) (a)=(b) +#endif + +/****************************************************************************** + * The sort macro is an adaptation of Simon Tatham's O(n log(n)) mergesort * + * Unwieldy variable names used here to avoid shadowing passed-in variables. * + *****************************************************************************/ +#define LL_SORT(list, cmp) \ + LL_SORT2(list, cmp, next) + +#define LL_SORT2(list, cmp, next) \ +do { \ + LDECLTYPE(list) _ls_p; \ + LDECLTYPE(list) _ls_q; \ + LDECLTYPE(list) _ls_e; \ + LDECLTYPE(list) _ls_tail; \ + int _ls_insize, _ls_nmerges, _ls_psize, _ls_qsize, _ls_i, _ls_looping; \ + if (list) { \ + _ls_insize = 1; \ + _ls_looping = 1; \ + while (_ls_looping) { \ + _CASTASGN(_ls_p,list); \ + list = NULL; \ + _ls_tail = NULL; \ + _ls_nmerges = 0; \ + while (_ls_p) { \ + _ls_nmerges++; \ + _ls_q = _ls_p; \ + _ls_psize = 0; \ + for (_ls_i = 0; _ls_i < _ls_insize; _ls_i++) { \ + _ls_psize++; \ + _SV(_ls_q,list); _ls_q = _NEXT(_ls_q,list,next); _RS(list); \ + if (!_ls_q) break; \ + } \ + _ls_qsize = _ls_insize; \ + while (_ls_psize > 0 || (_ls_qsize > 0 && _ls_q)) { \ + if (_ls_psize == 0) { \ + _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = \ + _NEXT(_ls_q,list,next); _RS(list); _ls_qsize--; \ + } else if (_ls_qsize == 0 || !_ls_q) { \ + _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = \ + _NEXT(_ls_p,list,next); _RS(list); _ls_psize--; \ + } else if (cmp(_ls_p,_ls_q) <= 0) { \ + _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = \ + _NEXT(_ls_p,list,next); _RS(list); _ls_psize--; \ + } else { \ + _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = \ + _NEXT(_ls_q,list,next); _RS(list); _ls_qsize--; \ + } \ + if (_ls_tail) { \ + _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,_ls_e,next); _RS(list); \ + } else { \ + _CASTASGN(list,_ls_e); \ + } \ + _ls_tail = _ls_e; \ + } \ + _ls_p = _ls_q; \ + } \ + if (_ls_tail) { \ + _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,NULL,next); _RS(list); \ + } \ + if (_ls_nmerges <= 1) { \ + _ls_looping=0; \ + } \ + _ls_insize *= 2; \ + } \ + } \ +} while (0) + + +#define DL_SORT(list, cmp) \ + DL_SORT2(list, cmp, prev, next) + +#define DL_SORT2(list, cmp, prev, next) \ +do { \ + LDECLTYPE(list) _ls_p; \ + LDECLTYPE(list) _ls_q; \ + LDECLTYPE(list) _ls_e; \ + LDECLTYPE(list) _ls_tail; \ + int _ls_insize, _ls_nmerges, _ls_psize, _ls_qsize, _ls_i, _ls_looping; \ + if (list) { \ + _ls_insize = 1; \ + _ls_looping = 1; \ + while (_ls_looping) { \ + _CASTASGN(_ls_p,list); \ + list = NULL; \ + _ls_tail = NULL; \ + _ls_nmerges = 0; \ + while (_ls_p) { \ + _ls_nmerges++; \ + _ls_q = _ls_p; \ + _ls_psize = 0; \ + for (_ls_i = 0; _ls_i < _ls_insize; _ls_i++) { \ + _ls_psize++; \ + _SV(_ls_q,list); _ls_q = _NEXT(_ls_q,list,next); _RS(list); \ + if (!_ls_q) break; \ + } \ + _ls_qsize = _ls_insize; \ + while (_ls_psize > 0 || (_ls_qsize > 0 && _ls_q)) { \ + if (_ls_psize == 0) { \ + _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = \ + _NEXT(_ls_q,list,next); _RS(list); _ls_qsize--; \ + } else if (_ls_qsize == 0 || !_ls_q) { \ + _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = \ + _NEXT(_ls_p,list,next); _RS(list); _ls_psize--; \ + } else if (cmp(_ls_p,_ls_q) <= 0) { \ + _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = \ + _NEXT(_ls_p,list,next); _RS(list); _ls_psize--; \ + } else { \ + _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = \ + _NEXT(_ls_q,list,next); _RS(list); _ls_qsize--; \ + } \ + if (_ls_tail) { \ + _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,_ls_e,next); _RS(list); \ + } else { \ + _CASTASGN(list,_ls_e); \ + } \ + _SV(_ls_e,list); _PREVASGN(_ls_e,list,_ls_tail,prev); _RS(list); \ + _ls_tail = _ls_e; \ + } \ + _ls_p = _ls_q; \ + } \ + _CASTASGN(list->prev, _ls_tail); \ + _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,NULL,next); _RS(list); \ + if (_ls_nmerges <= 1) { \ + _ls_looping=0; \ + } \ + _ls_insize *= 2; \ + } \ + } \ +} while (0) + +#define CDL_SORT(list, cmp) \ + CDL_SORT2(list, cmp, prev, next) + +#define CDL_SORT2(list, cmp, prev, next) \ +do { \ + LDECLTYPE(list) _ls_p; \ + LDECLTYPE(list) _ls_q; \ + LDECLTYPE(list) _ls_e; \ + LDECLTYPE(list) _ls_tail; \ + LDECLTYPE(list) _ls_oldhead; \ + LDECLTYPE(list) _tmp; \ + int _ls_insize, _ls_nmerges, _ls_psize, _ls_qsize, _ls_i, _ls_looping; \ + if (list) { \ + _ls_insize = 1; \ + _ls_looping = 1; \ + while (_ls_looping) { \ + _CASTASGN(_ls_p,list); \ + _CASTASGN(_ls_oldhead,list); \ + list = NULL; \ + _ls_tail = NULL; \ + _ls_nmerges = 0; \ + while (_ls_p) { \ + _ls_nmerges++; \ + _ls_q = _ls_p; \ + _ls_psize = 0; \ + for (_ls_i = 0; _ls_i < _ls_insize; _ls_i++) { \ + _ls_psize++; \ + _SV(_ls_q,list); \ + if (_NEXT(_ls_q,list,next) == _ls_oldhead) { \ + _ls_q = NULL; \ + } else { \ + _ls_q = _NEXT(_ls_q,list,next); \ + } \ + _RS(list); \ + if (!_ls_q) break; \ + } \ + _ls_qsize = _ls_insize; \ + while (_ls_psize > 0 || (_ls_qsize > 0 && _ls_q)) { \ + if (_ls_psize == 0) { \ + _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = \ + _NEXT(_ls_q,list,next); _RS(list); _ls_qsize--; \ + if (_ls_q == _ls_oldhead) { _ls_q = NULL; } \ + } else if (_ls_qsize == 0 || !_ls_q) { \ + _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = \ + _NEXT(_ls_p,list,next); _RS(list); _ls_psize--; \ + if (_ls_p == _ls_oldhead) { _ls_p = NULL; } \ + } else if (cmp(_ls_p,_ls_q) <= 0) { \ + _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = \ + _NEXT(_ls_p,list,next); _RS(list); _ls_psize--; \ + if (_ls_p == _ls_oldhead) { _ls_p = NULL; } \ + } else { \ + _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = \ + _NEXT(_ls_q,list,next); _RS(list); _ls_qsize--; \ + if (_ls_q == _ls_oldhead) { _ls_q = NULL; } \ + } \ + if (_ls_tail) { \ + _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,_ls_e,next); _RS(list); \ + } else { \ + _CASTASGN(list,_ls_e); \ + } \ + _SV(_ls_e,list); _PREVASGN(_ls_e,list,_ls_tail,prev); _RS(list); \ + _ls_tail = _ls_e; \ + } \ + _ls_p = _ls_q; \ + } \ + _CASTASGN(list->prev,_ls_tail); \ + _CASTASGN(_tmp,list); \ + _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,_tmp,next); _RS(list); \ + if (_ls_nmerges <= 1) { \ + _ls_looping=0; \ + } \ + _ls_insize *= 2; \ + } \ + } \ +} while (0) + +/****************************************************************************** + * singly linked list macros (non-circular) * + *****************************************************************************/ +#define LL_PREPEND(head,add) \ + LL_PREPEND2(head,add,next) + +#define LL_PREPEND2(head,add,next) \ +do { \ + (add)->next = head; \ + head = add; \ +} while (0) + +#define LL_CONCAT(head1,head2) \ + LL_CONCAT2(head1,head2,next) + +#define LL_CONCAT2(head1,head2,next) \ +do { \ + LDECLTYPE(head1) _tmp; \ + if (head1) { \ + _tmp = head1; \ + while (_tmp->next) { _tmp = _tmp->next; } \ + _tmp->next=(head2); \ + } else { \ + (head1)=(head2); \ + } \ +} while (0) + +#define LL_APPEND(head,add) \ + LL_APPEND2(head,add,next) + +#define LL_APPEND2(head,add,next) \ +do { \ + LDECLTYPE(head) _tmp; \ + (add)->next=NULL; \ + if (head) { \ + _tmp = head; \ + while (_tmp->next) { _tmp = _tmp->next; } \ + _tmp->next=(add); \ + } else { \ + (head)=(add); \ + } \ +} while (0) + +#define LL_DELETE(head,del) \ + LL_DELETE2(head,del,next) + +#define LL_DELETE2(head,del,next) \ +do { \ + LDECLTYPE(head) _tmp; \ + if ((head) == (del)) { \ + (head)=(head)->next; \ + } else { \ + _tmp = head; \ + while (_tmp->next && (_tmp->next != (del))) { \ + _tmp = _tmp->next; \ + } \ + if (_tmp->next) { \ + _tmp->next = ((del)->next); \ + } \ + } \ +} while (0) + +/* Here are VS2008 replacements for LL_APPEND and LL_DELETE */ +#define LL_APPEND_VS2008(head,add) \ + LL_APPEND2_VS2008(head,add,next) + +#define LL_APPEND2_VS2008(head,add,next) \ +do { \ + if (head) { \ + (add)->next = head; /* use add->next as a temp variable */ \ + while ((add)->next->next) { (add)->next = (add)->next->next; } \ + (add)->next->next=(add); \ + } else { \ + (head)=(add); \ + } \ + (add)->next=NULL; \ +} while (0) + +#define LL_DELETE_VS2008(head,del) \ + LL_DELETE2_VS2008(head,del,next) + +#define LL_DELETE2_VS2008(head,del,next) \ +do { \ + if ((head) == (del)) { \ + (head)=(head)->next; \ + } else { \ + char *_tmp = (char*)(head); \ + while ((head)->next && ((head)->next != (del))) { \ + head = (head)->next; \ + } \ + if ((head)->next) { \ + (head)->next = ((del)->next); \ + } \ + { \ + char **_head_alias = (char**)&(head); \ + *_head_alias = _tmp; \ + } \ + } \ +} while (0) +#ifdef NO_DECLTYPE +#undef LL_APPEND +#define LL_APPEND LL_APPEND_VS2008 +#undef LL_DELETE +#define LL_DELETE LL_DELETE_VS2008 +#undef LL_DELETE2 +#define LL_DELETE2 LL_DELETE2_VS2008 +#undef LL_APPEND2 +#define LL_APPEND2 LL_APPEND2_VS2008 +#undef LL_CONCAT /* no LL_CONCAT_VS2008 */ +#undef DL_CONCAT /* no DL_CONCAT_VS2008 */ +#endif +/* end VS2008 replacements */ + +#define LL_COUNT(head,el,counter) \ + LL_COUNT2(head,el,counter,next) \ + +#define LL_COUNT2(head,el,counter,next) \ +{ \ + counter = 0; \ + LL_FOREACH2(head,el,next){ ++counter; } \ +} + +#define LL_FOREACH(head,el) \ + LL_FOREACH2(head,el,next) + +#define LL_FOREACH2(head,el,next) \ + for(el=head;el;el=(el)->next) + +#define LL_FOREACH_SAFE(head,el,tmp) \ + LL_FOREACH_SAFE2(head,el,tmp,next) + +#define LL_FOREACH_SAFE2(head,el,tmp,next) \ + for((el)=(head);(el) && (tmp = (el)->next, 1); (el) = tmp) + +#define LL_SEARCH_SCALAR(head,out,field,val) \ + LL_SEARCH_SCALAR2(head,out,field,val,next) + +#define LL_SEARCH_SCALAR2(head,out,field,val,next) \ +do { \ + LL_FOREACH2(head,out,next) { \ + if ((out)->field == (val)) break; \ + } \ +} while(0) + +#define LL_SEARCH(head,out,elt,cmp) \ + LL_SEARCH2(head,out,elt,cmp,next) + +#define LL_SEARCH2(head,out,elt,cmp,next) \ +do { \ + LL_FOREACH2(head,out,next) { \ + if ((cmp(out,elt))==0) break; \ + } \ +} while(0) + +#define LL_REPLACE_ELEM(head, el, add) \ +do { \ + LDECLTYPE(head) _tmp; \ + assert(head != NULL); \ + assert(el != NULL); \ + assert(add != NULL); \ + (add)->next = (el)->next; \ + if ((head) == (el)) { \ + (head) = (add); \ + } else { \ + _tmp = head; \ + while (_tmp->next && (_tmp->next != (el))) { \ + _tmp = _tmp->next; \ + } \ + if (_tmp->next) { \ + _tmp->next = (add); \ + } \ + } \ +} while (0) + +#define LL_PREPEND_ELEM(head, el, add) \ +do { \ + LDECLTYPE(head) _tmp; \ + assert(head != NULL); \ + assert(el != NULL); \ + assert(add != NULL); \ + (add)->next = (el); \ + if ((head) == (el)) { \ + (head) = (add); \ + } else { \ + _tmp = head; \ + while (_tmp->next && (_tmp->next != (el))) { \ + _tmp = _tmp->next; \ + } \ + if (_tmp->next) { \ + _tmp->next = (add); \ + } \ + } \ +} while (0) \ + + +/****************************************************************************** + * doubly linked list macros (non-circular) * + *****************************************************************************/ +#define DL_PREPEND(head,add) \ + DL_PREPEND2(head,add,prev,next) + +#define DL_PREPEND2(head,add,prev,next) \ +do { \ + (add)->next = head; \ + if (head) { \ + (add)->prev = (head)->prev; \ + (head)->prev = (add); \ + } else { \ + (add)->prev = (add); \ + } \ + (head) = (add); \ +} while (0) + +#define DL_APPEND(head,add) \ + DL_APPEND2(head,add,prev,next) + +#define DL_APPEND2(head,add,prev,next) \ +do { \ + if (head) { \ + (add)->prev = (head)->prev; \ + (head)->prev->next = (add); \ + (head)->prev = (add); \ + (add)->next = NULL; \ + } else { \ + (head)=(add); \ + (head)->prev = (head); \ + (head)->next = NULL; \ + } \ +} while (0) + +#define DL_CONCAT(head1,head2) \ + DL_CONCAT2(head1,head2,prev,next) + +#define DL_CONCAT2(head1,head2,prev,next) \ +do { \ + LDECLTYPE(head1) _tmp; \ + if (head2) { \ + if (head1) { \ + _tmp = (head2)->prev; \ + (head2)->prev = (head1)->prev; \ + (head1)->prev->next = (head2); \ + (head1)->prev = _tmp; \ + } else { \ + (head1)=(head2); \ + } \ + } \ +} while (0) + +#define DL_DELETE(head,del) \ + DL_DELETE2(head,del,prev,next) + +#define DL_DELETE2(head,del,prev,next) \ +do { \ + assert((del)->prev != NULL); \ + if ((del)->prev == (del)) { \ + (head)=NULL; \ + } else if ((del)==(head)) { \ + (del)->next->prev = (del)->prev; \ + (head) = (del)->next; \ + } else { \ + (del)->prev->next = (del)->next; \ + if ((del)->next) { \ + (del)->next->prev = (del)->prev; \ + } else { \ + (head)->prev = (del)->prev; \ + } \ + } \ +} while (0) + +#define DL_COUNT(head,el,counter) \ + DL_COUNT2(head,el,counter,next) \ + +#define DL_COUNT2(head,el,counter,next) \ +{ \ + counter = 0; \ + DL_FOREACH2(head,el,next){ ++counter; } \ +} + +#define DL_FOREACH(head,el) \ + DL_FOREACH2(head,el,next) + +#define DL_FOREACH2(head,el,next) \ + for(el=head;el;el=(el)->next) + +/* this version is safe for deleting the elements during iteration */ +#define DL_FOREACH_SAFE(head,el,tmp) \ + DL_FOREACH_SAFE2(head,el,tmp,next) + +#define DL_FOREACH_SAFE2(head,el,tmp,next) \ + for((el)=(head);(el) && (tmp = (el)->next, 1); (el) = tmp) + +/* these are identical to their singly-linked list counterparts */ +#define DL_SEARCH_SCALAR LL_SEARCH_SCALAR +#define DL_SEARCH LL_SEARCH +#define DL_SEARCH_SCALAR2 LL_SEARCH_SCALAR2 +#define DL_SEARCH2 LL_SEARCH2 + +#define DL_REPLACE_ELEM(head, el, add) \ +do { \ + assert(head != NULL); \ + assert(el != NULL); \ + assert(add != NULL); \ + if ((head) == (el)) { \ + (head) = (add); \ + (add)->next = (el)->next; \ + if ((el)->next == NULL) { \ + (add)->prev = (add); \ + } else { \ + (add)->prev = (el)->prev; \ + (add)->next->prev = (add); \ + } \ + } else { \ + (add)->next = (el)->next; \ + (add)->prev = (el)->prev; \ + (add)->prev->next = (add); \ + if ((el)->next == NULL) { \ + (head)->prev = (add); \ + } else { \ + (add)->next->prev = (add); \ + } \ + } \ +} while (0) + +#define DL_PREPEND_ELEM(head, el, add) \ +do { \ + assert(head != NULL); \ + assert(el != NULL); \ + assert(add != NULL); \ + (add)->next = (el); \ + (add)->prev = (el)->prev; \ + (el)->prev = (add); \ + if ((head) == (el)) { \ + (head) = (add); \ + } else { \ + (add)->prev->next = (add); \ + } \ +} while (0) \ + + +/****************************************************************************** + * circular doubly linked list macros * + *****************************************************************************/ +#define CDL_PREPEND(head,add) \ + CDL_PREPEND2(head,add,prev,next) + +#define CDL_PREPEND2(head,add,prev,next) \ +do { \ + if (head) { \ + (add)->prev = (head)->prev; \ + (add)->next = (head); \ + (head)->prev = (add); \ + (add)->prev->next = (add); \ + } else { \ + (add)->prev = (add); \ + (add)->next = (add); \ + } \ +(head)=(add); \ +} while (0) + +#define CDL_DELETE(head,del) \ + CDL_DELETE2(head,del,prev,next) + +#define CDL_DELETE2(head,del,prev,next) \ +do { \ + if ( ((head)==(del)) && ((head)->next == (head))) { \ + (head) = 0L; \ + } else { \ + (del)->next->prev = (del)->prev; \ + (del)->prev->next = (del)->next; \ + if ((del) == (head)) (head)=(del)->next; \ + } \ +} while (0) + +#define CDL_COUNT(head,el,counter) \ + CDL_COUNT2(head,el,counter,next) \ + +#define CDL_COUNT2(head, el, counter,next) \ +{ \ + counter = 0; \ + CDL_FOREACH2(head,el,next){ ++counter; } \ +} + +#define CDL_FOREACH(head,el) \ + CDL_FOREACH2(head,el,next) + +#define CDL_FOREACH2(head,el,next) \ + for(el=head;el;el=((el)->next==head ? 0L : (el)->next)) + +#define CDL_FOREACH_SAFE(head,el,tmp1,tmp2) \ + CDL_FOREACH_SAFE2(head,el,tmp1,tmp2,prev,next) + +#define CDL_FOREACH_SAFE2(head,el,tmp1,tmp2,prev,next) \ + for((el)=(head), ((tmp1)=(head)?((head)->prev):NULL); \ + (el) && ((tmp2)=(el)->next, 1); \ + ((el) = (((el)==(tmp1)) ? 0L : (tmp2)))) + +#define CDL_SEARCH_SCALAR(head,out,field,val) \ + CDL_SEARCH_SCALAR2(head,out,field,val,next) + +#define CDL_SEARCH_SCALAR2(head,out,field,val,next) \ +do { \ + CDL_FOREACH2(head,out,next) { \ + if ((out)->field == (val)) break; \ + } \ +} while(0) + +#define CDL_SEARCH(head,out,elt,cmp) \ + CDL_SEARCH2(head,out,elt,cmp,next) + +#define CDL_SEARCH2(head,out,elt,cmp,next) \ +do { \ + CDL_FOREACH2(head,out,next) { \ + if ((cmp(out,elt))==0) break; \ + } \ +} while(0) + +#define CDL_REPLACE_ELEM(head, el, add) \ +do { \ + assert(head != NULL); \ + assert(el != NULL); \ + assert(add != NULL); \ + if ((el)->next == (el)) { \ + (add)->next = (add); \ + (add)->prev = (add); \ + (head) = (add); \ + } else { \ + (add)->next = (el)->next; \ + (add)->prev = (el)->prev; \ + (add)->next->prev = (add); \ + (add)->prev->next = (add); \ + if ((head) == (el)) { \ + (head) = (add); \ + } \ + } \ +} while (0) + +#define CDL_PREPEND_ELEM(head, el, add) \ +do { \ + assert(head != NULL); \ + assert(el != NULL); \ + assert(add != NULL); \ + (add)->next = (el); \ + (add)->prev = (el)->prev; \ + (el)->prev = (add); \ + (add)->prev->next = (add); \ + if ((head) == (el)) { \ + (head) = (add); \ + } \ +} while (0) \ + +#endif /* UTLIST_H */ + diff --git a/solo-ckpool.dockerfile b/solo-ckpool.dockerfile new file mode 100644 index 0000000..69acac8 --- /dev/null +++ b/solo-ckpool.dockerfile @@ -0,0 +1,65 @@ +############################ +# Docker build environment # +############################ +FROM ubuntu:22.04 AS build + +# Install build dependencies +RUN apt-get update || true && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + build-essential \ + yasm \ + libzmq3-dev \ + git \ + autotools-dev \ + autoconf \ + automake \ + pkg-config \ + libtool \ + && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +# Copy pre-cloned solo-ckpool source +WORKDIR /build +COPY solo-ckpool-source/ ckpool-solo/ + +WORKDIR /build/ckpool-solo + +# Build ckpool-solo +RUN ./autogen.sh && \ + ./configure && \ + make + +############################ +# Docker runtime environment # +############################ +FROM ubuntu:22.04 + +# Install runtime dependencies +RUN apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + libzmq5 \ + iproute2 \ + iputils-ping \ + curl \ + jq \ + && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +# Copy built binaries +COPY --from=build /build/ckpool-solo/src/ckpool /usr/local/bin/ +COPY --from=build /build/ckpool-solo/src/ckpmsg /usr/local/bin/ + +# Copy the monitoring script and startup script +COPY ./pools-latency-calculator/monitor_and_apply_latency.sh /usr/local/bin/monitor_and_apply_latency.sh +COPY ./start-ckpool.sh /usr/local/bin/start-ckpool.sh +RUN chmod +x /usr/local/bin/monitor_and_apply_latency.sh /usr/local/bin/start-ckpool.sh + +# Create required directories +RUN mkdir -p /var/log/ckpool /etc/ckpool + +# Set working directory +WORKDIR /etc/ckpool + +# Expose stratum port +EXPOSE 3333 + +# Default command - will be overridden by docker-compose +CMD ["/usr/local/bin/ckpool", "-B", "-c", "/etc/ckpool/ckpool.conf"] \ No newline at end of file diff --git a/start-ckpool.sh b/start-ckpool.sh new file mode 100755 index 0000000..d969913 --- /dev/null +++ b/start-ckpool.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# Clean up any leftover PID files +rm -f /var/log/ckpool/*.pid /var/log/ckpool/*.sock + +# Start latency monitoring in background +/usr/local/bin/monitor_and_apply_latency.sh 10.5.0.25 2 & + +# Start ckpool in foreground +exec /usr/local/bin/ckpool -B -c /etc/ckpool/ckpool.conf \ No newline at end of file From 88b1b661204d00c2df245dd7ac07a5ca720bee6b Mon Sep 17 00:00:00 2001 From: xyephy Date: Fri, 27 Jun 2025 15:06:52 +0300 Subject: [PATCH 2/4] Update integration to use GitHub repository as source - Modified solo-ckpool.dockerfile to clone from https://github.com/xyephy/solo-ckpool - Removed local solo-ckpool-source directory to keep PR focused - Updated documentation to reference GitHub repository - Maintains all fractional difficulty functionality while reducing PR size --- SOLO_CKPOOL_MODIFICATIONS.md | 2 +- solo-ckpool-source/.gitignore | 43 - solo-ckpool-source/AUTHORS | 7 - solo-ckpool-source/COPYING | 674 -- solo-ckpool-source/ChangeLog | 4 - solo-ckpool-source/Makefile.am | 3 - solo-ckpool-source/NEWS | 0 solo-ckpool-source/README | 334 - solo-ckpool-source/README-SOLOMINING | 168 - solo-ckpool-source/autogen.sh | 2 - solo-ckpool-source/cknode.conf | 34 - solo-ckpool-source/ckpassthrough.conf | 15 - solo-ckpool-source/ckpool.conf | 42 - solo-ckpool-source/ckproxy.conf | 24 - solo-ckpool-source/ckredirector.conf | 23 - solo-ckpool-source/configure.ac | 98 - solo-ckpool-source/m4/.gitignore | 30 - solo-ckpool-source/src/Makefile.am | 42 - solo-ckpool-source/src/bitcoin.c | 424 - solo-ckpool-source/src/bitcoin.h | 27 - solo-ckpool-source/src/ckpmsg.c | 330 - solo-ckpool-source/src/ckpool.c | 1904 ---- solo-ckpool-source/src/ckpool.h | 404 - solo-ckpool-source/src/connector.c | 1667 ---- solo-ckpool-source/src/connector.h | 20 - solo-ckpool-source/src/generator.c | 3422 ------- solo-ckpool-source/src/generator.h | 30 - solo-ckpool-source/src/jansson-2.14/CHANGES | 986 -- .../src/jansson-2.14/CMakeLists.txt | 662 -- solo-ckpool-source/src/jansson-2.14/LICENSE | 19 - .../src/jansson-2.14/Makefile.am | 13 - .../src/jansson-2.14/README.rst | 81 - .../src/jansson-2.14/android/jansson_config.h | 43 - .../cmake/CheckFunctionKeywords.cmake | 15 - .../src/jansson-2.14/cmake/CodeCoverage.cmake | 163 - .../src/jansson-2.14/cmake/FindSphinx.cmake | 315 - .../jansson-2.14/cmake/janssonConfig.cmake.in | 4 - .../jansson-2.14/cmake/jansson_config.h.cmake | 74 - .../cmake/jansson_private_config.h.cmake | 53 - .../src/jansson-2.14/configure.ac | 168 - .../src/jansson-2.14/doc/Makefile.am | 20 - .../src/jansson-2.14/doc/README | 5 - .../src/jansson-2.14/doc/apiref.rst | 2064 ---- .../src/jansson-2.14/doc/changes.rst | 5 - .../src/jansson-2.14/doc/conf.py | 217 - .../src/jansson-2.14/doc/conformance.rst | 119 - .../src/jansson-2.14/doc/ext/refcounting.py | 69 - .../src/jansson-2.14/doc/gettingstarted.rst | 264 - .../src/jansson-2.14/doc/github_commits.c | 180 - .../src/jansson-2.14/doc/index.rst | 53 - .../src/jansson-2.14/doc/threadsafety.rst | 82 - .../src/jansson-2.14/doc/tutorial.rst | 288 - .../src/jansson-2.14/doc/upgrading.rst | 76 - .../src/jansson-2.14/examples/README.rst | 4 - .../src/jansson-2.14/examples/simple_parse.c | 200 - .../src/jansson-2.14/jansson.pc.in | 10 - .../jansson-2.14/jansson_private_config.h.in | 160 - .../src/jansson-2.14/scripts/clang-format | 3 - .../jansson-2.14/scripts/clang-format-check | 27 - .../src/jansson-2.14/src/Makefile.am | 30 - .../src/jansson-2.14/src/dump.c | 492 - .../src/jansson-2.14/src/error.c | 59 - .../src/jansson-2.14/src/hashtable.c | 340 - .../src/jansson-2.14/src/hashtable.h | 186 - .../src/jansson-2.14/src/hashtable_seed.c | 277 - .../src/jansson-2.14/src/jansson.def | 83 - .../src/jansson-2.14/src/jansson.h | 422 - .../src/jansson-2.14/src/jansson_config.h.in | 51 - .../src/jansson-2.14/src/jansson_private.h | 118 - .../src/jansson-2.14/src/load.c | 1106 --- .../src/jansson-2.14/src/lookup3.h | 382 - .../src/jansson-2.14/src/memory.c | 81 - .../src/jansson-2.14/src/pack_unpack.c | 937 -- .../src/jansson-2.14/src/strbuffer.c | 103 - .../src/jansson-2.14/src/strbuffer.h | 35 - .../src/jansson-2.14/src/strconv.c | 132 - solo-ckpool-source/src/jansson-2.14/src/utf.c | 163 - solo-ckpool-source/src/jansson-2.14/src/utf.h | 29 - .../src/jansson-2.14/src/value.c | 1112 --- .../src/jansson-2.14/src/version.c | 28 - .../src/jansson-2.14/test-driver | 153 - solo-ckpool-source/src/libckpool.c | 2258 ----- solo-ckpool-source/src/libckpool.h | 616 -- solo-ckpool-source/src/notifier.c | 63 - solo-ckpool-source/src/sha2.c | 236 - solo-ckpool-source/src/sha2.h | 69 - .../open_software_license.txt | 32 - .../src/sha256_code_release/sha256_avx1.asm | 588 -- .../sha256_code_release/sha256_avx2_rorx2.asm | 828 -- .../src/sha256_code_release/sha256_sse4.asm | 546 -- solo-ckpool-source/src/stratifier.c | 8617 ----------------- solo-ckpool-source/src/stratifier.h | 102 - solo-ckpool-source/src/uthash.h | 1144 --- solo-ckpool-source/src/utlist.h | 757 -- solo-ckpool.dockerfile | 5 +- 95 files changed, 4 insertions(+), 38391 deletions(-) delete mode 100644 solo-ckpool-source/.gitignore delete mode 100644 solo-ckpool-source/AUTHORS delete mode 100644 solo-ckpool-source/COPYING delete mode 100644 solo-ckpool-source/ChangeLog delete mode 100644 solo-ckpool-source/Makefile.am delete mode 100644 solo-ckpool-source/NEWS delete mode 100644 solo-ckpool-source/README delete mode 100644 solo-ckpool-source/README-SOLOMINING delete mode 100755 solo-ckpool-source/autogen.sh delete mode 100644 solo-ckpool-source/cknode.conf delete mode 100644 solo-ckpool-source/ckpassthrough.conf delete mode 100644 solo-ckpool-source/ckpool.conf delete mode 100644 solo-ckpool-source/ckproxy.conf delete mode 100644 solo-ckpool-source/ckredirector.conf delete mode 100644 solo-ckpool-source/configure.ac delete mode 100644 solo-ckpool-source/m4/.gitignore delete mode 100644 solo-ckpool-source/src/Makefile.am delete mode 100644 solo-ckpool-source/src/bitcoin.c delete mode 100644 solo-ckpool-source/src/bitcoin.h delete mode 100644 solo-ckpool-source/src/ckpmsg.c delete mode 100644 solo-ckpool-source/src/ckpool.c delete mode 100644 solo-ckpool-source/src/ckpool.h delete mode 100644 solo-ckpool-source/src/connector.c delete mode 100644 solo-ckpool-source/src/connector.h delete mode 100644 solo-ckpool-source/src/generator.c delete mode 100644 solo-ckpool-source/src/generator.h delete mode 100644 solo-ckpool-source/src/jansson-2.14/CHANGES delete mode 100644 solo-ckpool-source/src/jansson-2.14/CMakeLists.txt delete mode 100644 solo-ckpool-source/src/jansson-2.14/LICENSE delete mode 100644 solo-ckpool-source/src/jansson-2.14/Makefile.am delete mode 100644 solo-ckpool-source/src/jansson-2.14/README.rst delete mode 100644 solo-ckpool-source/src/jansson-2.14/android/jansson_config.h delete mode 100644 solo-ckpool-source/src/jansson-2.14/cmake/CheckFunctionKeywords.cmake delete mode 100644 solo-ckpool-source/src/jansson-2.14/cmake/CodeCoverage.cmake delete mode 100644 solo-ckpool-source/src/jansson-2.14/cmake/FindSphinx.cmake delete mode 100644 solo-ckpool-source/src/jansson-2.14/cmake/janssonConfig.cmake.in delete mode 100644 solo-ckpool-source/src/jansson-2.14/cmake/jansson_config.h.cmake delete mode 100644 solo-ckpool-source/src/jansson-2.14/cmake/jansson_private_config.h.cmake delete mode 100644 solo-ckpool-source/src/jansson-2.14/configure.ac delete mode 100644 solo-ckpool-source/src/jansson-2.14/doc/Makefile.am delete mode 100644 solo-ckpool-source/src/jansson-2.14/doc/README delete mode 100644 solo-ckpool-source/src/jansson-2.14/doc/apiref.rst delete mode 100644 solo-ckpool-source/src/jansson-2.14/doc/changes.rst delete mode 100644 solo-ckpool-source/src/jansson-2.14/doc/conf.py delete mode 100644 solo-ckpool-source/src/jansson-2.14/doc/conformance.rst delete mode 100644 solo-ckpool-source/src/jansson-2.14/doc/ext/refcounting.py delete mode 100644 solo-ckpool-source/src/jansson-2.14/doc/gettingstarted.rst delete mode 100644 solo-ckpool-source/src/jansson-2.14/doc/github_commits.c delete mode 100644 solo-ckpool-source/src/jansson-2.14/doc/index.rst delete mode 100644 solo-ckpool-source/src/jansson-2.14/doc/threadsafety.rst delete mode 100644 solo-ckpool-source/src/jansson-2.14/doc/tutorial.rst delete mode 100644 solo-ckpool-source/src/jansson-2.14/doc/upgrading.rst delete mode 100644 solo-ckpool-source/src/jansson-2.14/examples/README.rst delete mode 100644 solo-ckpool-source/src/jansson-2.14/examples/simple_parse.c delete mode 100644 solo-ckpool-source/src/jansson-2.14/jansson.pc.in delete mode 100644 solo-ckpool-source/src/jansson-2.14/jansson_private_config.h.in delete mode 100755 solo-ckpool-source/src/jansson-2.14/scripts/clang-format delete mode 100755 solo-ckpool-source/src/jansson-2.14/scripts/clang-format-check delete mode 100644 solo-ckpool-source/src/jansson-2.14/src/Makefile.am delete mode 100644 solo-ckpool-source/src/jansson-2.14/src/dump.c delete mode 100644 solo-ckpool-source/src/jansson-2.14/src/error.c delete mode 100644 solo-ckpool-source/src/jansson-2.14/src/hashtable.c delete mode 100644 solo-ckpool-source/src/jansson-2.14/src/hashtable.h delete mode 100644 solo-ckpool-source/src/jansson-2.14/src/hashtable_seed.c delete mode 100644 solo-ckpool-source/src/jansson-2.14/src/jansson.def delete mode 100644 solo-ckpool-source/src/jansson-2.14/src/jansson.h delete mode 100644 solo-ckpool-source/src/jansson-2.14/src/jansson_config.h.in delete mode 100644 solo-ckpool-source/src/jansson-2.14/src/jansson_private.h delete mode 100644 solo-ckpool-source/src/jansson-2.14/src/load.c delete mode 100644 solo-ckpool-source/src/jansson-2.14/src/lookup3.h delete mode 100644 solo-ckpool-source/src/jansson-2.14/src/memory.c delete mode 100644 solo-ckpool-source/src/jansson-2.14/src/pack_unpack.c delete mode 100644 solo-ckpool-source/src/jansson-2.14/src/strbuffer.c delete mode 100644 solo-ckpool-source/src/jansson-2.14/src/strbuffer.h delete mode 100644 solo-ckpool-source/src/jansson-2.14/src/strconv.c delete mode 100644 solo-ckpool-source/src/jansson-2.14/src/utf.c delete mode 100644 solo-ckpool-source/src/jansson-2.14/src/utf.h delete mode 100644 solo-ckpool-source/src/jansson-2.14/src/value.c delete mode 100644 solo-ckpool-source/src/jansson-2.14/src/version.c delete mode 100755 solo-ckpool-source/src/jansson-2.14/test-driver delete mode 100644 solo-ckpool-source/src/libckpool.c delete mode 100644 solo-ckpool-source/src/libckpool.h delete mode 100644 solo-ckpool-source/src/notifier.c delete mode 100644 solo-ckpool-source/src/sha2.c delete mode 100644 solo-ckpool-source/src/sha2.h delete mode 100644 solo-ckpool-source/src/sha256_code_release/open_software_license.txt delete mode 100644 solo-ckpool-source/src/sha256_code_release/sha256_avx1.asm delete mode 100644 solo-ckpool-source/src/sha256_code_release/sha256_avx2_rorx2.asm delete mode 100644 solo-ckpool-source/src/sha256_code_release/sha256_sse4.asm delete mode 100644 solo-ckpool-source/src/stratifier.c delete mode 100644 solo-ckpool-source/src/stratifier.h delete mode 100644 solo-ckpool-source/src/uthash.h delete mode 100644 solo-ckpool-source/src/utlist.h diff --git a/SOLO_CKPOOL_MODIFICATIONS.md b/SOLO_CKPOOL_MODIFICATIONS.md index 3753623..1c5856e 100644 --- a/SOLO_CKPOOL_MODIFICATIONS.md +++ b/SOLO_CKPOOL_MODIFICATIONS.md @@ -34,7 +34,7 @@ Solo-ckpool has been successfully integrated into the benchmarking tool to provi ### Integration Components -1. **solo-ckpool.dockerfile**: Builds solo-ckpool from source +1. **solo-ckpool.dockerfile**: Builds solo-ckpool from GitHub source (https://github.com/xyephy/solo-ckpool) 2. **solo-ckpool service**: Runs ckpool in BTCSOLO mode (-B flag) 3. **solo-ckpool-miner-proxy**: Monitors traffic and collects metrics 4. **Configuration files**: Pool settings and Bitcoin daemon connection diff --git a/solo-ckpool-source/.gitignore b/solo-ckpool-source/.gitignore deleted file mode 100644 index e53c249..0000000 --- a/solo-ckpool-source/.gitignore +++ /dev/null @@ -1,43 +0,0 @@ -*.o -*.bin -*.la -*.lo - -autom4te.cache -.deps - -Makefile -Makefile.in -INSTALL -aclocal.m4 -configure -depcomp -missing -install-sh -stamp-h1 -compile -config.log -config.status -config.guess -config.sub - -*~ - -ext_deps -config.h.in -config.h - -mkinstalldirs - -*.swp -src/ckpool -src/ckpmsg -ltmain.sh - -*.m4 - -.libs/ - -libtool - - diff --git a/solo-ckpool-source/AUTHORS b/solo-ckpool-source/AUTHORS deleted file mode 100644 index 4944848..0000000 --- a/solo-ckpool-source/AUTHORS +++ /dev/null @@ -1,7 +0,0 @@ -Con Kolivas -Core project lead, maintainer, author of ckpool and libckpool. -14BMjogz69qe8hk9thyzbmR5pg34mVKB1e - -Andrew Smith -Maintainer and author of ckdb. -1Jjk2LmktEQKnv8r2cZ9MvLiZwZ9gxabKm diff --git a/solo-ckpool-source/COPYING b/solo-ckpool-source/COPYING deleted file mode 100644 index 94a9ed0..0000000 --- a/solo-ckpool-source/COPYING +++ /dev/null @@ -1,674 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. diff --git a/solo-ckpool-source/ChangeLog b/solo-ckpool-source/ChangeLog deleted file mode 100644 index 23ff63f..0000000 --- a/solo-ckpool-source/ChangeLog +++ /dev/null @@ -1,4 +0,0 @@ -See git repository ('git log') for full changelog. - -Git repository can be found at: -https://bitbucket.org/ckolivas/ckpool-solo diff --git a/solo-ckpool-source/Makefile.am b/solo-ckpool-source/Makefile.am deleted file mode 100644 index 126dcda..0000000 --- a/solo-ckpool-source/Makefile.am +++ /dev/null @@ -1,3 +0,0 @@ -ACLOCAL_AMFLAGS = -I m4 -SUBDIRS = src -EXTRA_DIST = ckpool.conf ckproxy.conf README README-SOLOMINING diff --git a/solo-ckpool-source/NEWS b/solo-ckpool-source/NEWS deleted file mode 100644 index e69de29..0000000 diff --git a/solo-ckpool-source/README b/solo-ckpool-source/README deleted file mode 100644 index 154361e..0000000 --- a/solo-ckpool-source/README +++ /dev/null @@ -1,334 +0,0 @@ -CKPOOL + CKPROXY + libckpool by Con Kolivas - -Ultra low overhead massively scalable multi-process, multi-threaded modular -bitcoin mining pool, proxy, passthrough, and library in c for Linux. - -CKPOOL is code provided free of charge under the GPLv3 license but its development -is mostly paid for by commissioned funding, and the pool by default contributes -0.5% of solved blocks in pool mode to the development team. Please consider leaving -this contribution in the code if you are running it on a pool or contributing to the -authors listed in AUTHORS if you use this code to aid funding further development. - ---- -LICENSE: - -GNU Public license V3. See included COPYING for details. - - ---- -DESIGN: - -Architecture: - -- Low level hand coded architecture relying on minimal outside libraries beyond -basic glibc functions for maximum flexibility and minimal overhead that can be -built and deployed on any Linux installation. - -- Multiprocess+multithreaded design to scale to massive deployments and -capitalise on modern multicore/multithread CPU designs. - -- Minimal memory overhead. - -- Utilises ultra reliable unix sockets for communication with dependent -processes. - -- Modular code design to streamline further development. - -- Standalone library code that can be utilised independently of ckpool. - -- Same code can be deployed in many different modes designed to talk to each -other on the same machine, local lan or remote internet locations. - - ---- -Modes of deployment: - -- Simple pool. - -- Simple pool with per-user solo mining. - -- Simple proxy without the limitations of hashrate inherent in other proxy -solutions when talking to ckpool. - -- Passthrough node(s) that combine connections to a single socket which can -be used to scale to millions of clients and allow the main pool to be isolated -from direct communication with clients. - -- Library for use by other software. - - ---- -Features: - -- Bitcoind communication to unmodified bitcoind with multiple failover to local -or remote locations. - -- Local pool instance worker limited only by operating system resources and -can be made virtually limitless through use of multiple downstream passthrough -nodes. - -- Proxy and passthrough modes can set up multiple failover upstream pools. - -- Optional share logging. - -- Virtually seamless restarts for upgrades through socket handover from exiting -instances to new starting instance. - -- Configurable custom coinbase signature. - -- Configurable instant starting and minimum difficulty. - -- Rapid vardiff adjustment with stable unlimited maximum difficulty handling. - -- New work generation on block changes incorporate full bitcoind transaction -set without delay or requiring to send transactionless work to miners thereby -providing the best bitcoin network support and rewarding miners with the most -transaction fees. - -- Event driven communication based on communication readiness preventing -slow communicating clients from delaying low latency ones. - -- Stratum messaging system to running clients. - -- Accurate pool and per client statistics. - -- Multiple named instances can be run concurrently on the same machine. - - ---- -BUILDING: - -Building ckpool requires no dependencies outside of the basic build tools and -yasm on any linux installation. Recommended zmq notification support (ckpool -only) requires the zmq devel library installed. - - -Building with zmq (preferred build but not required for ckproxy): - -sudo apt-get install build-essential yasm libzmq3-dev - -./configure - -make - - -Basic build: - -sudo apt-get install build-essential yasm - -./configure - -make - - -Building from git also requires autoconf, automake, and pkgconf: - -sudo apt-get install build-essential yasm autoconf automake libtool libzmq3-dev pkgconf - -./autogen.sh - -./configure - -make - - -Binaries will be built in the src/ subdirectory. Binaries generated will be: - -ckpool - The main pool back end - -ckproxy - A link to ckpool that automatically starts it in proxy mode - -ckpmsg - An application for passing messages in libckpool format to ckpool - -notifier - An application designed to be run with bitcoind's -blocknotify to - notify ckpool of block changes. - - -Installation is NOT required and ckpool can be run directly from the directory -it's built in but it can be installed with: -sudo make install - - ---- -RUNNING: - -ckpool supports the following options: - --B | --btcsolo - --c CONFIG | --config CONFIG - --g GROUP | --group GROUP - --H | --handover - --h | --help - --k | --killold - --L | --log-shares - --l LOGLEVEL | --loglevel LOGLEVEL - --N | --node - --n NAME | --name NAME - --P | --passthrough - --p | --proxy - --R | --redirector - --s SOCKDIR | --sockdir SOCKDIR - --u | --userproxy - - --B will start ckpool in BTCSOLO mode, which is designed for solo mining. All -usernames connected must be valid bitcoin addresses, and 100% of the block -reward will go to the user solving the block, minus any donation set. - --c tells ckpool to override its default configuration filename and -load the specified one. If -c is not specified, ckpool looks for ckpool.conf, -in proxy mode it looks for ckproxy.conf, in passthrough mode for -ckpassthrough.conf and in redirector mode for ckredirector.conf - --g will start ckpool as the group ID specified. - --H will make ckpool attempt to receive a handover from a running incidence of -ckpool with the same name, taking its client listening socket and shutting it -down. - --h displays the above help - --k will make ckpool shut down an existing instance of ckpool with the same name, -killing it if need be. Otherwise ckpool will refuse to start if an instance of -the same name is already running. - --L will log per share information in the logs directory divided by block height -and then workbase. - --l will change the ckpool process name to that specified, allowing -multiple different named instances to be running. By default the variant -names are used: ckpool, ckproxy, ckpassthrough, ckredirector, cknode. - --P will start ckpool in passthrough proxy mode where it collates all incoming -connections and streams all information on a single connection to an upstream -pool specified in ckproxy.conf . Downstream users all retain their individual -presence on the master pool. Standalone mode is implied. - --p will start ckpool in proxy mode where it appears to be a local pool handling -clients as separate entities while presenting shares as a single user to the -upstream pool specified. Note that the upstream pool needs to be a ckpool for -it to scale to large hashrates. Standalone mode is Optional. - --R will start ckpool in a variant of passthrough mode. It is designed to be a -front end to filter out users that never contribute any shares. Once an -accepted share from the upstream pool is detected, it will issue a redirect to -one of the redirecturl entries in the configuration file. It will cycle over -entries if multiple exist, but try to keep all clients from the same IP -redirecting to the same pool. - --s tells ckpool which directory to place its own communication -sockets (/tmp by default) - --u Userproxy mode will start ckpool in proxy mode as per the -p option above, -but in addition it will accept username/passwords from the stratum connects -and try to open additional connections with those credentials to the upstream -pool specified in the configuration file and then reconnect miners to mine with -their chosen username/password to the upstream pool. - - -ckpmsg and notifier support the -n, -p and -s options - ---- -CONFIGURATION - -At least one bitcoind is mandatory in ckpool mode with the minimum requirements -of server, rpcuser and rpcpassword set. - -Ckpool takes a json encoded configuration file in ckpool.conf by default or -ckproxy.conf in proxy or passthrough mode unless specified with -c. Sample -configurations for ckpool and ckproxy are included with the source. Entries -after the valid json are ignored and the space there can be used for comments. -The options recognised are as follows: - - -"btcd" : This is an array of bitcoind(s) with the options url, auth and pass -which match the configured bitcoind. The optional boolean field notify tells -ckpool this btcd is using the notifier and does not need to be polled for block -changes. If no btcd is specified, ckpool will look for one on localhost:8332 -with the username "user" and password "pass". - -"proxy" : This is an array in the same format as btcd above but is used in -proxy and passthrough mode to set the upstream pool and is mandatory. - -"btcaddress" : This is the bitcoin address to try to generate blocks to. It is -ignored in BTCSOLO mode. - -"btcsig" : This is an optional signature to put into the coinbase of mined -blocks. - -"blockpoll" : This is the frequency in milliseconds for how often to check for -new network blocks and is 100 by default. It is intended to be a backup only -for when the notifier is not set up and only polls if the "notify" field is -not set on a btcd. - -"donation" : Optional percentage donation of block reward that goes to the -developer of ckpool to assist in further development and maintenance of the -code. Takes a floating point value and defaults to zero if not set. - -"nodeserver" : This takes the same format as the serverurl array and specifies -additional IPs/ports to bind to that will accept incoming requests for mining -node communications. It is recommended to selectively isolate this address -to minimise unnecessary communications with unauthorised nodes. - -"nonce1length" : This is optional allowing the extranonce1 length to be chosen -from 2 to 8. Default 4 - -"nonce2length" : This is optional allowing the extranonce2 length to be chosen -from 2 to 8. Default 8 - -"update_interval" : This is the frequency that stratum updates are sent out to -miners and is set to 30 seconds by default to help perpetuate transactions for -the health of the bitcoin network. - -"version_mask" : This is a mask of which bits in the version number it is valid -for a client to alter and is expressed as an hex string. Eg "00fff000" -Default is "1fffe000". - -"serverurl" : This is the IP(s) to try to bind ckpool uniquely to, otherwise it -will attempt to bind to all interfaces in port 3333 by default in pool mode -and 3334 in proxy mode. Multiple entries can be specified as an array by -either IP or resolvable domain name but the executable must be able to bind to -all of them and ports up to 1024 usually require privileged access. - -"redirecturl" : This is an array of URLs that ckpool will redirect active -miners to in redirector mode. They must be valid resolvable URLs+ports. - -"mindiff" : Minimum diff that vardiff will allow miners to drop to. Default 1 - -"startdiff" : Starting diff that new clients are given. Default 42 - -"maxdiff" : Optional maximum diff that vardiff will clamp to where zero is no -maximum. - -"logdir" : Which directory to store pool and client logs. Default "logs" - -"maxclients" : Optional upper limit on the number of clients ckpool will -accept before rejecting further clients. - -"zmqblock" : Optional interface to use for zmq blockhash notification - ckpool -only. Requires use of matched bitcoind -zmqpubhashblock option. -Default: tcp://127.0.0.1:28332 diff --git a/solo-ckpool-source/README-SOLOMINING b/solo-ckpool-source/README-SOLOMINING deleted file mode 100644 index 3022e89..0000000 --- a/solo-ckpool-source/README-SOLOMINING +++ /dev/null @@ -1,168 +0,0 @@ -Local solo mining. - ---- - -QUICK START INSTRUCTIONS (build instructions not included.) - -Get a password from bitcoin core for the RPC (remote procedure calls) to allow -the pool to talk to it. You will need bitcoin core source code. - -Within the bitcoin source code directory type the following command: -share/rpcauth/rpcauth.py ckpool - -This will give you a message such as: - -String to be appended to bitcoin.conf: -rpcauth=ckpool:c6f55b4a74b8fcbca4e8b2be22d7d53b$e2ca5e642d7ef4f43ab2524964dc6b3625ccfde09a97866c5b97c40622192149 -Your password: -sI7jIjC61U9ZYTT29GnBpm0Rg1qQV9w_TXOfBF1vOM8 - - -Edit bitcoin.conf, enabling RPC (remote procedure calls) and add the rpcauth -line above. The following would allow ckpool to talk to a bitcoin daemon running -on the same hardware: - -server=1 -rpcauth=ckpool:c6f55b4a74b8fcbca4e8b2be22d7d53b$e2ca5e642d7ef4f43ab2524964dc6b3625ccfde09a97866c5b97c40622192149 -rpcallowip=127.0.0.1 -rpcbind=127.0.0.1 - -Make sure to use the line you got when running the rpcauth command. - - -Restart the bitcoin daemon with zmq messaging enabled by adding the following -to the startup command: - --zmqpubhashblock=tcp://127.0.0.1:28332 - - -If your bitcoin daemon was built without zmq support, you can use the ckpool -notifier included by adding the following command - --blocknotify=$CKPOOLSOURCE/src/notifier - -(Replace $CKPOOLSOURCE with the path to where you have the ckpool source code) - - -Create or modify a ckpool configuration file (such as ckpool.conf), including -the minimum necessary entries. Make sure to use the password you got in step 1. - -{ -"btcd" : [ - { - "url" : "127.0.0.1:8332", - "auth" : "ckpool", - "pass" : "sI7jIjC61U9ZYTT29GnBpm0Rg1qQV9w_TXOfBF1vOM8", - "notify" : true - } -] -} - - -Start ckpool from the source code directory (pointing to the configuration file -only if it has a different name to ckpool.conf or is placed elsewhere) in solo -mode: - -src/ckpool -B - - -Point the pools entry on your mining hardware to the local IP address where -ckpool is running on port 3333, setting a username to the bitcoin address you -wish to mine to, and put anything in the password field (such as "x") -.e.g if ckpool has a local IP address of 192.168.1.100 - -url: 192.168.1.100:3333 -username: 1PKN98VN2z5gwSGZvGKS2bj8aADZBkyhkZ -password: x - -Any valid bitcoin address will work - -(Hope for) profit. - ---- - -OPTIONAL CHANGES. - - -Most of the ckpool configuration options would not need to be modified for a -local solo mining operation, and some of the config options are not used in -solo mode. The ckpool.conf included with the source has all the available -configuration options and is not recommended to be used as is. The following -options may be useful for a local solo mining operation. - - -Mining to one fixed address. If you only plan to mine to one fixed address and -not have to worry about setting the username in every piece of mining hardware, -you can set a bitcoin address to mine to as follows: - -"btcaddress" : "14BMjogz69qe8hk9thyzbmR5pg34mVKB1e", - -You must then start ckpool withOUT the -B option. This would mine to the -address 14BMjogz69qe8hk9thyzbmR5pg34mVKB1e, so modify it to the bitcoin address -you wish to mine to. - - -You can set the starting diff (instead of the default 42) on the pool as -follows: - -"startdiff" : 10000, - - -You can define a signature to be mined into any blocks you solved as follows: - -"btcsig" : "/mined by ck/", - - -You may wish to enable a donation to the author of ckpool with any blocks found -as a percentage (such as 0.5%) as follows: - -"donation" : 0.5, - -Donation is completely optional and disabled by default, but most appreciated. -0.5% would be a reasonable value. - - -By default ckpool binds to every local IP address on the hardware it's run on, -but you can restrict it to certain addresses or change the port it runs on as -follows: - -"serverurl" : [ - "127.0.0.1:3333", - "192.168.1.100:3334" -], - -In addition, if you specify a port above 4000, it will become a "high diff" -port that sets the minimum difficulty to 1 million. - -You can specify a different configuration file as follows: - -src/ckpool -B -c myconfig.conf - -or you can start ckpool with a different name and it will look for the -associated configuration - -src/ckpool -B -n local - -this will look for a configuration file called local.conf - ---- - -NOTES. - -Json is very strict with its field processing although spacing is flexible. The -most common error to watch out for is to NOT put a comma after the last field. - -You can mine with a pruned blockchain if you are short on space, though it is -not recommended as it can add more latency. - -Bitcoin core is NOT optimised for mining by default without modification, and -mining solo locally should be reserved as a backup operation only unless you -have the skills, hardware, and data centre quality connectivity to minimise -latency. - -Mining on testnet may create a cascade of solved competing blocks when the diff -is 1. This is normal as the default behaviour is optimised around mainnet -mining where block solving is rare. - - -Good luck. diff --git a/solo-ckpool-source/autogen.sh b/solo-ckpool-source/autogen.sh deleted file mode 100755 index b483139..0000000 --- a/solo-ckpool-source/autogen.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -autoreconf --force --install -I m4 diff --git a/solo-ckpool-source/cknode.conf b/solo-ckpool-source/cknode.conf deleted file mode 100644 index 3a02557..0000000 --- a/solo-ckpool-source/cknode.conf +++ /dev/null @@ -1,34 +0,0 @@ -{ -"btcd" : [ - { - "url" : "localhost:8332", - "auth" : "user", - "pass" : "pass", - "notify" : true - }, - { - "url" : "backup:8332", - "auth" : "user", - "pass" : "pass", - "notify" : false - } -], -"proxy" : [ - { - "url" : "ckpool.org:3333", - "auth" : "user", - "pass" : "pass" - }, - { - "url" : "backup.ckpool.org:3333", - "auth" : "user", - "pass" : "pass" - } -], -"serverurl" : [ - "192.168.1.100:3334", - "127.0.0.1:3334" - ], -"logdir" : "logs" -} -Comments from here on are ignored. diff --git a/solo-ckpool-source/ckpassthrough.conf b/solo-ckpool-source/ckpassthrough.conf deleted file mode 100644 index c5b85bc..0000000 --- a/solo-ckpool-source/ckpassthrough.conf +++ /dev/null @@ -1,15 +0,0 @@ -{ -"proxy" : [ - { - "url" : "ckpool.org:3333", - "auth" : "user", - "pass" : "pass" - } -], -"serverurl" : [ - "192.168.1.100:3334", - "127.0.0.1:3334" - ], -"logdir" : "logs" -} -Comments from here on are ignored. diff --git a/solo-ckpool-source/ckpool.conf b/solo-ckpool-source/ckpool.conf deleted file mode 100644 index d6c9cd8..0000000 --- a/solo-ckpool-source/ckpool.conf +++ /dev/null @@ -1,42 +0,0 @@ -{ -"btcd" : [ - { - "url" : "localhost:8332", - "auth" : "user", - "pass" : "pass", - "notify" : true - }, - { - "url" : "backup:8332", - "auth" : "user", - "pass" : "pass", - "notify" : false - } -], -"upstream" : "main.ckpool.org:3336", -"btcaddress" : "14BMjogz69qe8hk9thyzbmR5pg34mVKB1e", -"btcsig" : "/mined by ck/", -"blockpoll" : 100, -"donation" : 2.0, -"nonce1length" : 4, -"nonce2length" : 8, -"update_interval" : 30, -"version_mask" : "1fffe000", -"serverurl" : [ - "ckpool.org:3333", - "node.ckpool.org:3333", - "node.ckpool.org:80" -], -"nodeserver" : [ - "ckpool.org:3335" -], -"trusted" : [ - "ckpool.org:3336" -], -"mindiff" : 1, -"startdiff" : 42, -"maxdiff" : 0, -"zmqblock" : "tcp://127.0.0.1:28332", -"logdir" : "logs" -} -Comments from here on are ignored. diff --git a/solo-ckpool-source/ckproxy.conf b/solo-ckpool-source/ckproxy.conf deleted file mode 100644 index fec5783..0000000 --- a/solo-ckpool-source/ckproxy.conf +++ /dev/null @@ -1,24 +0,0 @@ -{ -"proxy" : [ - { - "url" : "ckpool.org:3333", - "auth" : "user", - "pass" : "pass" - }, - { - "url" : "backup.ckpool.org:3333", - "auth" : "user", - "pass" : "pass" - } -], -"update_interval" : 30, -"serverurl" : [ - "192.168.1.100:3334", - "127.0.0.1:3334" - ], -"mindiff" : 1, -"startdiff" : 42, -"maxdiff" : 0, -"logdir" : "logs" -} -Comments from here on are ignored. diff --git a/solo-ckpool-source/ckredirector.conf b/solo-ckpool-source/ckredirector.conf deleted file mode 100644 index 2c499a5..0000000 --- a/solo-ckpool-source/ckredirector.conf +++ /dev/null @@ -1,23 +0,0 @@ -{ -"proxy" : [ - { - "url" : "ckpool.org:3333", - "auth" : "user", - "pass" : "pass" - } -], -"update_interval" : 30, -"serverurl" : [ - "192.168.1.100:3334", - "127.0.0.1:3334" - ], -"redirecturl" : [ - "node1.ckpool.org:3333", - "node2.ckpool.org:3333" - ], -"mindiff" : 1, -"startdiff" : 42, -"maxdiff" : 0, -"logdir" : "logs" -} -Comments from here on are ignored. diff --git a/solo-ckpool-source/configure.ac b/solo-ckpool-source/configure.ac deleted file mode 100644 index 5c3bfe5..0000000 --- a/solo-ckpool-source/configure.ac +++ /dev/null @@ -1,98 +0,0 @@ -AC_INIT([ckpool],[0.9.9],[kernel@kolivas.org]) - -AC_CANONICAL_TARGET -AC_CONFIG_MACRO_DIR([m4]) -AC_CONFIG_SRCDIR([src/ckpool.c]) -AC_CONFIG_HEADERS([config.h]) - -AM_INIT_AUTOMAKE([foreign subdir-objects]) -m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) -AC_USE_SYSTEM_EXTENSIONS - -AC_CANONICAL_BUILD -AC_CANONICAL_HOST - -dnl Make sure anyone changing configure.ac/Makefile.am has a clue -AM_MAINTAINER_MODE - -dnl Checks for programs -AC_PROG_CC -# gl_EARLY - maybe later -AC_PROG_GCC_TRADITIONAL -AM_PROG_CC_C_O -LT_INIT([disable-shared]) - -# gl_INIT - maybe later - -dnl Checks for header files. - -AC_FUNC_ALLOCA - -PKG_PROG_PKG_CONFIG() - -AC_CHECK_HEADERS(stdio.h stdlib.h fcntl.h sys/time.h unistd.h dirent.h) -AC_CHECK_HEADERS(ctype.h errno.h byteswap.h string.h time.h fenv.h) -AC_CHECK_HEADERS(endian.h sys/endian.h arpa/inet.h sys/poll.h syslog.h) -AC_CHECK_HEADERS(alloca.h pthread.h stdio.h math.h signal.h sys/prctl.h) -AC_CHECK_HEADERS(sys/types.h sys/socket.h sys/stat.h linux/un.h netdb.h) -AC_CHECK_HEADERS(stdint.h netinet/in.h netinet/tcp.h sys/ioctl.h getopt.h) -AC_CHECK_HEADERS(sys/epoll.h libpq-fe.h postgresql/libpq-fe.h grp.h) -AC_CHECK_HEADERS(gsl/gsl_math.h gsl/gsl_cdf.h) -AC_CHECK_HEADERS(openssl/x509.h openssl/hmac.h) -AC_CHECK_HEADERS(zmq.h) - -AC_CHECK_PROG(YASM, yasm, yes) -AM_CONDITIONAL([HAVE_YASM], [test x$YASM = xyes]) - -rorx= -avx1= -sse4= -if test x$YASM = xyes; then - rorx=`cat /proc/cpuinfo | grep -o -m 1 avx2` - if [test x$rorx != xavx2]; then - avx1=`cat /proc/cpuinfo | grep -o -m 1 avx` - if [test x$avx1 != xavx]; then - sse4=`cat /proc/cpuinfo | grep -o -m 1 sse4_1` - fi - fi -fi -AM_CONDITIONAL([HAVE_AVX2], [test x$rorx = xavx2]) -AM_CONDITIONAL([HAVE_AVX1], [test x$avx1 = xavx]) -AM_CONDITIONAL([HAVE_SSE4], [test x$sse4 = xsse4_1]) -if test x$rorx = xavx2; then - AC_DEFINE([USE_AVX2], [1], [Use avx2 assembly instructions for sha256]) -fi -if test x$avx1 = xavx; then - AC_DEFINE([USE_AVX1], [1], [Use avx1 assembly instructions for sha256]) -fi -if test x$sse4 = xsse4_1; then - AC_DEFINE([USE_SSE4], [1], [Use sse4 assembly instructions for sha256]) -fi - -AC_CONFIG_SUBDIRS([src/jansson-2.14]) -JANSSON_LIBS="jansson-2.14/src/.libs/libjansson.a" - -AC_SUBST(JANSSON_LIBS) - -AC_SEARCH_LIBS(clock_nanosleep, rt, , echo "Error: Required library rt not found." && exit 1) -AC_SEARCH_LIBS(exp, m, , echo "Error: Required library math not found." && exit 1) -AC_SEARCH_LIBS(pthread_mutex_trylock, pthread, , echo "Error: Required library pthreads not found." && exit 1) -AC_SEARCH_LIBS(zmq_socket, zmq, ZMQ=yes, ZMQ=no) - -AC_CONFIG_FILES([Makefile src/Makefile]) -AC_OUTPUT - -LDFLAGS="${LDFLAGS} -Wl,--as-needed" - -echo -echo "Compilation............: make (or gmake)" -echo " YASM (Intel ASM).....: $YASM" -echo " ZMQ..................: $ZMQ" -echo " CPPFLAGS.............: $CPPFLAGS" -echo " CFLAGS...............: $CFLAGS" -echo " LDFLAGS..............: $LDFLAGS" -echo " LDADD................: $LIBS $JANSSON_LIBS" -echo -echo "Installation...........: make install (as root if needed, with 'su' or 'sudo')" -echo " prefix...............: $prefix" -echo diff --git a/solo-ckpool-source/m4/.gitignore b/solo-ckpool-source/m4/.gitignore deleted file mode 100644 index 9d5dc7f..0000000 --- a/solo-ckpool-source/m4/.gitignore +++ /dev/null @@ -1,30 +0,0 @@ -*.o -*.bin - -autom4te.cache -.deps - -Makefile -Makefile.in -INSTALL -aclocal.m4 -configure -depcomp -missing -install-sh -stamp-h1 -compile -config.log -config.status -config.guess -config.sub - -*~ - -ext_deps -config.h.in -config.h - -mkinstalldirs - -*.swp diff --git a/solo-ckpool-source/src/Makefile.am b/solo-ckpool-source/src/Makefile.am deleted file mode 100644 index ba0af01..0000000 --- a/solo-ckpool-source/src/Makefile.am +++ /dev/null @@ -1,42 +0,0 @@ -SUBDIRS = jansson-2.14 - -ACLOCAL_AMFLAGS = -I m4 -AM_CPPFLAGS = -I$(top_srcdir)/src/jansson-2.14/src - -native_objs := - -if HAVE_AVX2 -native_objs += sha256_code_release/sha256_avx2_rorx2.A -endif -if HAVE_AVX1 -native_objs += sha256_code_release/sha256_avx1.A -endif -if HAVE_SSE4 -native_objs += sha256_code_release/sha256_sse4.A -endif - -%.A: %.asm - yasm -f x64 -f elf64 -X gnu -g dwarf2 -D LINUX -o $@ $< - -noinst_LIBRARIES = libckpool.a -libckpool_a_SOURCES = libckpool.c libckpool.h sha2.c sha2.h sha256_code_release -libckpool_a_LIBADD = $(native_objs) - -bin_PROGRAMS = ckpool ckpmsg notifier -ckpool_SOURCES = ckpool.c ckpool.h generator.c generator.h bitcoin.c bitcoin.h \ - stratifier.c stratifier.h connector.c connector.h uthash.h \ - utlist.h -ckpool_LDADD = libckpool.a @JANSSON_LIBS@ @LIBS@ - -ckpmsg_SOURCES = ckpmsg.c -ckpmsg_LDADD = libckpool.a @JANSSON_LIBS@ - -notifier_SOURCES = notifier.c -notifier_LDADD = libckpool.a @JANSSON_LIBS@ - -install-exec-hook: - setcap CAP_NET_BIND_SERVICE=+eip $(bindir)/ckpool - $(LN_S) -f ckpool $(DESTDIR)$(bindir)/ckproxy - -uninstall-local: - rm -f $(bindir)/ckproxy diff --git a/solo-ckpool-source/src/bitcoin.c b/solo-ckpool-source/src/bitcoin.c deleted file mode 100644 index fa2132e..0000000 --- a/solo-ckpool-source/src/bitcoin.c +++ /dev/null @@ -1,424 +0,0 @@ -/* - * Copyright 2014-2018,2023 Con Kolivas - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 3 of the License, or (at your option) - * any later version. See COPYING for more details. - */ - -#include "config.h" - -#include - -#include "ckpool.h" -#include "libckpool.h" -#include "bitcoin.h" -#include "stratifier.h" - -static char* understood_rules[] = {"segwit"}; - -static bool check_required_rule(const char* rule) -{ - unsigned int i; - - for (i = 0; i < sizeof(understood_rules) / sizeof(understood_rules[0]); i++) { - if (safecmp(understood_rules[i], rule) == 0) - return true; - } - return false; -} - -/* Take a bitcoin address and do some sanity checks on it, then send it to - * bitcoind to see if it's a valid address */ -bool validate_address(connsock_t *cs, const char *address, bool *script, bool *segwit) -{ - json_t *val, *res_val, *valid_val, *tmp_val; - char rpc_req[128]; - bool ret = false; - - if (unlikely(!address)) { - LOGWARNING("Null address passed to validate_address"); - return ret; - } - - snprintf(rpc_req, 128, "{\"method\": \"validateaddress\", \"params\": [\"%s\"]}\n", address); - val = json_rpc_response(cs, rpc_req); - if (!val) { - /* May get a parse error with an invalid address */ - LOGNOTICE("%s:%s Failed to get valid json response to validate_address %s", - cs->url, cs->port, address); - return ret; - } - res_val = json_object_get(val, "result"); - if (!res_val) { - LOGERR("Failed to get result json response to validate_address"); - goto out; - } - valid_val = json_object_get(res_val, "isvalid"); - if (!valid_val) { - LOGERR("Failed to get isvalid json response to validate_address"); - goto out; - } - if (!json_is_true(valid_val)) { - LOGDEBUG("Bitcoin address %s is NOT valid", address); - goto out; - } - ret = true; - tmp_val = json_object_get(res_val, "isscript"); - if (unlikely(!tmp_val)) { - /* All recent bitcoinds with wallet support built in should - * support this, if not, look for addresses the braindead way - * to tell if it's a script address. */ - LOGDEBUG("No isscript support from bitcoind"); - if (address[0] == '3' || address[0] == '2') - *script = true; - /* Now look to see this isn't a bech32: We can't support - * bech32 without knowing if it's a pubkey or a script */ - else if (address[0] != '1' && address[0] != 'm') - ret = false; - goto out; - } - *script = json_is_true(tmp_val); - tmp_val = json_object_get(res_val, "iswitness"); - if (unlikely(!tmp_val)) - goto out; - *segwit = json_is_true(tmp_val); - LOGDEBUG("Bitcoin address %s IS valid%s%s", address, *script ? " script" : "", - *segwit ? " segwit" : ""); -out: - if (val) - json_decref(val); - return ret; -} - -json_t *validate_txn(connsock_t *cs, const char *txn) -{ - json_t *val = NULL; - char *rpc_req; - int len; - - if (unlikely(!txn || !strlen(txn))) { - LOGWARNING("Null transaction passed to validate_txn"); - goto out; - } - len = strlen(txn) + 64; - rpc_req = ckalloc(len); - sprintf(rpc_req, "{\"method\": \"decoderawtransaction\", \"params\": [\"%s\"]}", txn); - val = json_rpc_call(cs, rpc_req); - dealloc(rpc_req); - if (!val) - LOGDEBUG("%s:%s Failed to get valid json response to decoderawtransaction", cs->url, cs->port); -out: - return val; -} - -static const char *gbt_req = "{\"method\": \"getblocktemplate\", \"params\": [{\"capabilities\": [\"coinbasetxn\", \"workid\", \"coinbase/append\"], \"rules\" : [\"segwit\"]}]}\n"; - -/* Request getblocktemplate from bitcoind already connected with a connsock_t - * and then summarise the information to the most efficient set of data - * required to assemble a mining template, storing it in a gbtbase_t structure */ -bool gen_gbtbase(connsock_t *cs, gbtbase_t *gbt) -{ - json_t *rules_array, *coinbase_aux, *res_val, *val; - const char *previousblockhash; - char hash_swap[32], tmp[32]; - uint64_t coinbasevalue; - const char *target; - const char *flags; - const char *bits; - const char *rule; - int version; - int curtime; - int height; - int i; - bool ret = false; - - val = json_rpc_call(cs, gbt_req); - if (!val) { - LOGWARNING("%s:%s Failed to get valid json response to getblocktemplate", cs->url, cs->port); - return ret; - } - res_val = json_object_get(val, "result"); - if (!res_val) { - LOGWARNING("Failed to get result in json response to getblocktemplate"); - goto out; - } - - rules_array = json_object_get(res_val, "rules"); - if (rules_array) { - int rule_count = json_array_size(rules_array); - - for (i = 0; i < rule_count; i++) { - rule = json_string_value(json_array_get(rules_array, i)); - if (rule && *rule++ == '!' && !check_required_rule(rule)) { - LOGERR("Required rule not understood: %s", rule); - goto out; - } - } - } - - previousblockhash = json_string_value(json_object_get(res_val, "previousblockhash")); - target = json_string_value(json_object_get(res_val, "target")); - version = json_integer_value(json_object_get(res_val, "version")); - curtime = json_integer_value(json_object_get(res_val, "curtime")); - bits = json_string_value(json_object_get(res_val, "bits")); - height = json_integer_value(json_object_get(res_val, "height")); - coinbasevalue = json_integer_value(json_object_get(res_val, "coinbasevalue")); - coinbase_aux = json_object_get(res_val, "coinbaseaux"); - flags = json_string_value(json_object_get(coinbase_aux, "flags")); - if (!flags) - flags = ""; - - if (unlikely(!previousblockhash || !target || !version || !curtime || !bits || !coinbase_aux)) { - LOGERR("JSON failed to decode GBT %s %s %d %d %s %s", previousblockhash, target, version, curtime, bits, flags); - goto out; - } - - /* Store getblocktemplate for remainder of json components as is */ - json_incref(res_val); - json_object_del(val, "result"); - gbt->json = res_val; - - hex2bin(hash_swap, previousblockhash, 32); - swap_256(tmp, hash_swap); - __bin2hex(gbt->prevhash, tmp, 32); - - strncpy(gbt->target, target, 65); - - hex2bin(hash_swap, target, 32); - bswap_256(tmp, hash_swap); - gbt->diff = diff_from_target((uchar *)tmp); - json_object_set_new_nocheck(gbt->json, "diff", json_real(gbt->diff)); - - gbt->version = version; - - gbt->curtime = curtime; - - snprintf(gbt->ntime, 9, "%08x", curtime); - json_object_set_new_nocheck(gbt->json, "ntime", json_string_nocheck(gbt->ntime)); - sscanf(gbt->ntime, "%x", &gbt->ntime32); - - snprintf(gbt->bbversion, 9, "%08x", version); - json_object_set_new_nocheck(gbt->json, "bbversion", json_string_nocheck(gbt->bbversion)); - - snprintf(gbt->nbit, 9, "%s", bits); - json_object_set_new_nocheck(gbt->json, "nbit", json_string_nocheck(gbt->nbit)); - - gbt->coinbasevalue = coinbasevalue; - - gbt->height = height; - - gbt->flags = strdup(flags); - - ret = true; -out: - json_decref(val); - return ret; -} - -void clear_gbtbase(gbtbase_t *gbt) -{ - free(gbt->flags); - if (gbt->json) - json_decref(gbt->json); - memset(gbt, 0, sizeof(gbtbase_t)); -} - -static const char *blockcount_req = "{\"method\": \"getblockcount\"}\n"; - -/* Request getblockcount from bitcoind, returning the count or -1 if the call - * fails. */ -int get_blockcount(connsock_t *cs) -{ - json_t *val, *res_val; - int ret = -1; - - val = json_rpc_call(cs, blockcount_req); - if (!val) { - LOGWARNING("%s:%s Failed to get valid json response to getblockcount", cs->url, cs->port); - return ret; - } - res_val = json_object_get(val, "result"); - if (!res_val) { - LOGWARNING("Failed to get result in json response to getblockcount"); - goto out; - } - ret = json_integer_value(res_val); -out: - json_decref(val); - return ret; -} - -/* Request getblockhash from bitcoind for height, writing the value into *hash - * which should be at least 65 bytes long since the hash is 64 chars. */ -bool get_blockhash(connsock_t *cs, int height, char *hash) -{ - json_t *val, *res_val; - const char *res_ret; - char rpc_req[128]; - bool ret = false; - - sprintf(rpc_req, "{\"method\": \"getblockhash\", \"params\": [%d]}\n", height); - val = json_rpc_call(cs, rpc_req); - if (!val) { - LOGWARNING("%s:%s Failed to get valid json response to getblockhash", cs->url, cs->port); - return ret; - } - res_val = json_object_get(val, "result"); - if (!res_val) { - LOGWARNING("Failed to get result in json response to getblockhash"); - goto out; - } - res_ret = json_string_value(res_val); - if (!res_ret || !strlen(res_ret)) { - LOGWARNING("Got null string in result to getblockhash"); - goto out; - } - strncpy(hash, res_ret, 65); - ret = true; -out: - json_decref(val); - return ret; -} - -static const char *bestblockhash_req = "{\"method\": \"getbestblockhash\"}\n"; - -/* Request getbestblockhash from bitcoind. bitcoind 0.9+ only */ -bool get_bestblockhash(connsock_t *cs, char *hash) -{ - json_t *val, *res_val; - const char *res_ret; - bool ret = false; - - val = json_rpc_call(cs, bestblockhash_req); - if (!val) { - LOGWARNING("%s:%s Failed to get valid json response to getbestblockhash", cs->url, cs->port); - return ret; - } - res_val = json_object_get(val, "result"); - if (!res_val) { - LOGWARNING("Failed to get result in json response to getbestblockhash"); - goto out; - } - res_ret = json_string_value(res_val); - if (!res_ret || !strlen(res_ret)) { - LOGWARNING("Got null string in result to getbestblockhash"); - goto out; - } - strncpy(hash, res_ret, 65); - ret = true; -out: - json_decref(val); - return ret; -} - -bool submit_block(connsock_t *cs, const char *params) -{ - json_t *val, *res_val; - int len, retries = 0; - const char *res_ret; - bool ret = false; - char *rpc_req; - - len = strlen(params) + 64; -retry: - rpc_req = ckalloc(len); - sprintf(rpc_req, "{\"method\": \"submitblock\", \"params\": [\"%s\"]}\n", params); - val = json_rpc_call(cs, rpc_req); - dealloc(rpc_req); - if (!val) { - LOGWARNING("%s:%s Failed to get valid json response to submitblock", cs->url, cs->port); - if (++retries < 5) - goto retry; - return ret; - } - res_val = json_object_get(val, "result"); - if (!res_val) { - LOGWARNING("Failed to get result in json response to submitblock"); - if (++retries < 5) { - json_decref(val); - goto retry; - } - goto out; - } - if (!json_is_null(res_val)) { - res_ret = json_string_value(res_val); - if (res_ret && strlen(res_ret)) { - LOGWARNING("SUBMIT BLOCK RETURNED: %s", res_ret); - /* Consider duplicate response as an accepted block */ - if (safecmp(res_ret, "duplicate")) - goto out; - } else { - LOGWARNING("SUBMIT BLOCK GOT NO RESPONSE!"); - goto out; - } - } - LOGWARNING("BLOCK ACCEPTED!"); - ret = true; -out: - json_decref(val); - return ret; -} - -void precious_block(connsock_t *cs, const char *params) -{ - char *rpc_req; - int len; - - if (unlikely(!cs->alive)) { - LOGDEBUG("Failed to submit_txn due to connsock dead"); - return; - } - - len = strlen(params) + 64; - rpc_req = ckalloc(len); - sprintf(rpc_req, "{\"method\": \"preciousblock\", \"params\": [\"%s\"]}\n", params); - json_rpc_msg(cs, rpc_req); - dealloc(rpc_req); -} - -void submit_txn(connsock_t *cs, const char *params) -{ - char *rpc_req; - int len; - - if (unlikely(!cs->alive)) { - LOGDEBUG("Failed to submit_txn due to connsock dead"); - return; - } - - len = strlen(params) + 64; - rpc_req = ckalloc(len); - sprintf(rpc_req, "{\"method\": \"sendrawtransaction\", \"params\": [\"%s\"]}\n", params); - json_rpc_msg(cs, rpc_req); - dealloc(rpc_req); -} - -char *get_txn(connsock_t *cs, const char *hash) -{ - char *rpc_req, *ret = NULL; - json_t *val, *res_val; - - if (unlikely(!cs->alive)) { - LOGDEBUG("Failed to get_txn due to connsock dead"); - goto out; - } - - ASPRINTF(&rpc_req, "{\"method\": \"getrawtransaction\", \"params\": [\"%s\"]}\n", hash); - val = json_rpc_response(cs, rpc_req); - dealloc(rpc_req); - if (!val) { - LOGDEBUG("%s:%s Failed to get valid json response to get_txn", cs->url, cs->port); - goto out; - } - res_val = json_object_get(val, "result"); - if (res_val && !json_is_null(res_val) && json_is_string(res_val)) { - ret = strdup(json_string_value(res_val)); - LOGDEBUG("get_txn for hash %s got data %s", hash, ret); - } else - LOGDEBUG("get_txn did not retrieve data for hash %s", hash); - json_decref(val); -out: - return ret; -} diff --git a/solo-ckpool-source/src/bitcoin.h b/solo-ckpool-source/src/bitcoin.h deleted file mode 100644 index 73013c7..0000000 --- a/solo-ckpool-source/src/bitcoin.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2014-2018,2023 Con Kolivas - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 3 of the License, or (at your option) - * any later version. See COPYING for more details. - */ - -#ifndef BITCOIN_H -#define BITCOIN_H - -typedef struct genwork gbtbase_t; - -bool validate_address(connsock_t *cs, const char *address, bool *script, bool *segwit); -json_t *validate_txn(connsock_t *cs, const char *txn); -bool gen_gbtbase(connsock_t *cs, gbtbase_t *gbt); -void clear_gbtbase(gbtbase_t *gbt); -int get_blockcount(connsock_t *cs); -bool get_blockhash(connsock_t *cs, int height, char *hash); -bool get_bestblockhash(connsock_t *cs, char *hash); -bool submit_block(connsock_t *cs, const char *params); -void precious_block(connsock_t *cs, const char *params); -void submit_txn(connsock_t *cs, const char *params); -char *get_txn(connsock_t *cs, const char *hash); - -#endif /* BITCOIN_H */ diff --git a/solo-ckpool-source/src/ckpmsg.c b/solo-ckpool-source/src/ckpmsg.c deleted file mode 100644 index c9763cb..0000000 --- a/solo-ckpool-source/src/ckpmsg.c +++ /dev/null @@ -1,330 +0,0 @@ -/* - * Copyright 2014-2018,2023 Con Kolivas - * Copyright 2014-2016 Andrew Smith - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 3 of the License, or (at your option) - * any later version. See COPYING for more details. - */ - -#include "config.h" - -#include -#include -#include -#include -#include -#include -#include - -#include "libckpool.h" -#include "utlist.h" - -struct input_log { - struct input_log *next; - struct input_log *prev; - char *buf; -}; - -struct input_log *input_log; - -static int msg_loglevel = LOG_DEBUG; - -void logmsg(int loglevel, const char *fmt, ...) -{ - va_list ap; - char *buf; - - if (loglevel <= msg_loglevel) { - va_start(ap, fmt); - VASPRINTF(&buf, fmt, ap); - va_end(ap); - - printf("%s\n", buf); - free(buf); - } -} - -void mkstamp(char *stamp, size_t siz) -{ - long minoff, hroff; - char tzinfo[24]; - time_t now_t; - struct tm tm; - char tzch; - - now_t = time(NULL); - localtime_r(&now_t, &tm); - minoff = tm.tm_gmtoff / 60; - if (minoff < 0) { - tzch = '-'; - minoff *= -1; - } else - tzch = '+'; - hroff = minoff / 60; - if (minoff % 60) { - snprintf(tzinfo, sizeof(tzinfo), - "%c%02ld:%02ld", - tzch, hroff, minoff % 60); - } else { - snprintf(tzinfo, sizeof(tzinfo), - "%c%02ld", - tzch, hroff); - } - snprintf(stamp, siz, - "[%d-%02d-%02d %02d:%02d:%02d%s]", - tm.tm_year + 1900, - tm.tm_mon + 1, - tm.tm_mday, - tm.tm_hour, - tm.tm_min, - tm.tm_sec, - tzinfo); -} - -static struct option long_options[] = { - {"counter", no_argument, 0, 'c'}, - {"help", no_argument, 0, 'h'}, - {"loglevel", required_argument, 0, 'l'}, - {"name", required_argument, 0, 'n'}, - {"sockname", required_argument, 0, 'N'}, - {"proxy", no_argument, 0, 'p'}, - {"sockdir", required_argument, 0, 's'}, - {"timeout1", required_argument, 0, 't'}, - {"timeout2", required_argument, 0, 'T'}, - {0, 0, 0, 0} -}; - -struct termios oldctrl; - -static void sighandler(const int sig) -{ - /* Return console to its previous state */ - tcsetattr(STDIN_FILENO, TCSANOW, &oldctrl); - - if (sig) { - signal (sig, SIG_DFL); - raise (sig); - } -} - -int get_line(char **buf) -{ - struct input_log *entry = NULL; - int c, len = 0, ctl1, ctl2; - struct termios ctrl; - *buf = NULL; - - /* If we're not reading from a terminal, parse lines at a time allowing - * us to script usage of ckpmsg */ - if (!isatty(fileno((FILE *)stdin))) do { - size_t n; - - dealloc(*buf); - len = getline(buf, &n, stdin); - if (len == -1) { - dealloc(*buf); - goto out; - } - len = strlen(*buf); - (*buf)[--len] = '\0'; // Strip \n - goto out; - } while (42); - - tcgetattr(STDIN_FILENO, &ctrl); - ctrl.c_lflag &= ~(ICANON | ECHO); // turn off canonical mode and echo - tcsetattr(STDIN_FILENO, TCSANOW, &ctrl); - - do { - c = getchar(); - if (c == EOF || c == '\n') - break; - if (c == 27) { - ctl1 = getchar(); - ctl2 = getchar(); - if (ctl1 != '[') - continue; - if (ctl2 < 'A' || ctl2 > 'B') - continue; - if (!input_log) - continue; - printf("\33[2K\r"); - free(*buf); - if (ctl2 == 'B') - entry = entry ? entry->prev : input_log->prev; - else - entry = entry ? entry->next : input_log; - *buf = strdup(entry->buf); - len = strlen(*buf); - printf("%s", *buf); - } - if (c == 127) { - if (!len) - continue; - printf("\b \b"); - (*buf)[--len] = '\0'; - continue; - } - if (c < 32 || c > 126) - continue; - len++; - realloc_strcat(buf, (char *)&c); - putchar(c); - } while (42); - - if (*buf) - len = strlen(*buf); - printf("\n"); -out: - return len; -} - -int main(int argc, char **argv) -{ - char *name = NULL, *socket_dir = NULL, *buf = NULL, *sockname = "listener"; - bool proxy = false, counter = false; - int tmo1 = RECV_UNIX_TIMEOUT1; - int tmo2 = RECV_UNIX_TIMEOUT2; - struct sigaction handler; - int c, count, i = 0, j; - char stamp[128]; - - tcgetattr(STDIN_FILENO, &oldctrl); - - while ((c = getopt_long(argc, argv, "chl:N:n:ps:t:T:", long_options, &i)) != -1) { - switch(c) { - /* You'd normally disable most logmsg with -l 3 to - * only see the counter */ - case 'c': - counter = true; - break; - case 'h': - for (j = 0; long_options[j].val; j++) { - struct option *jopt = &long_options[j]; - - if (jopt->has_arg) { - char *upper = alloca(strlen(jopt->name) + 1); - int offset = 0; - - do { - upper[offset] = toupper(jopt->name[offset]); - } while (upper[offset++] != '\0'); - printf("-%c %s | --%s %s\n", jopt->val, - upper, jopt->name, upper); - } else - printf("-%c | --%s\n", jopt->val, jopt->name); - } - exit(0); - case 'l': - msg_loglevel = atoi(optarg); - if (msg_loglevel < LOG_EMERG || - msg_loglevel > LOG_DEBUG) { - quit(1, "Invalid loglevel: %d (range %d" - " - %d)", - msg_loglevel, - LOG_EMERG, - LOG_DEBUG); - } - break; - /* Allows us to specify which process or socket to - * talk to. */ - case 'N': - sockname = strdup(optarg); - break; - case 'n': - name = strdup(optarg); - break; - case 'p': - proxy = true; - break; - case 's': - socket_dir = strdup(optarg); - break; - case 't': - tmo1 = atoi(optarg); - break; - case 'T': - tmo2 = atoi(optarg); - break; - } - } - if (!socket_dir) - socket_dir = strdup("/tmp"); - trail_slash(&socket_dir); - if (!name) { - if (proxy) - name = strdup("ckproxy"); - else - name = strdup("ckpool"); - } - realloc_strcat(&socket_dir, name); - dealloc(name); - trail_slash(&socket_dir); - realloc_strcat(&socket_dir, sockname); - - signal(SIGPIPE, SIG_IGN); - handler.sa_handler = &sighandler; - handler.sa_flags = 0; - sigemptyset(&handler.sa_mask); - sigaction(SIGTERM, &handler, NULL); - sigaction(SIGINT, &handler, NULL); - sigaction(SIGQUIT, &handler, NULL); - sigaction(SIGKILL, &handler, NULL); - sigaction(SIGHUP, &handler, NULL); - - count = 0; - while (42) { - struct input_log *log_entry; - int sockd, len; - char *buf2; - - len = get_line(&buf); - if (len == -1) - break; - mkstamp(stamp, sizeof(stamp)); - if (len < 1) { - LOGERR("%s No message", stamp); - continue; - } - if (buf[0] == '#') { - LOGDEBUG("%s Got comment: %s", stamp, buf); - continue; - } - LOGDEBUG("%s Got message: %s", stamp, buf); - log_entry = ckalloc(sizeof(struct input_log)); - log_entry->buf = buf; - CDL_PREPEND(input_log, log_entry); - - sockd = open_unix_client(socket_dir); - if (sockd < 0) { - LOGERR("Failed to open socket: %s", socket_dir); - break; - } - if (!send_unix_msg(sockd, buf)) { - LOGERR("Failed to send unix msg: %s", buf); - break; - } - buf2 = recv_unix_msg_tmo2(sockd, tmo1, tmo2); - close(sockd); - if (!buf2) { - LOGERR("Received empty reply"); - continue; - } - mkstamp(stamp, sizeof(stamp)); - LOGMSGSIZ(65536, LOG_NOTICE, "%s Received response: %s", stamp, buf2); - dealloc(buf2); - - if (counter) { - if ((++count % 100) == 0) { - printf("%8d\r", count); - fflush(stdout); - } - } - } - - dealloc(socket_dir); - sighandler(0); - - return 0; -} diff --git a/solo-ckpool-source/src/ckpool.c b/solo-ckpool-source/src/ckpool.c deleted file mode 100644 index ea99b41..0000000 --- a/solo-ckpool-source/src/ckpool.c +++ /dev/null @@ -1,1904 +0,0 @@ -/* - * Copyright 2014-2020,2023 Con Kolivas - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 3 of the License, or (at your option) - * any later version. See COPYING for more details. - */ - -#include "config.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ckpool.h" -#include "libckpool.h" -#include "generator.h" -#include "stratifier.h" -#include "connector.h" - -ckpool_t *global_ckp; - -static bool open_logfile(ckpool_t *ckp) -{ - if (ckp->logfd > 0) { - flock(ckp->logfd, LOCK_EX); - fflush(ckp->logfp); - Close(ckp->logfd); - } - ckp->logfp = fopen(ckp->logfilename, "ae"); - if (unlikely(!ckp->logfp)) { - LOGEMERG("Failed to make open log file %s", ckp->logfilename); - return false; - } - /* Make logging line buffered */ - setvbuf(ckp->logfp, NULL, _IOLBF, 0); - ckp->logfd = fileno(ckp->logfp); - ckp->lastopen_t = time(NULL); - return true; -} - -/* Use ckmsgqs for logging to console and files to prevent logmsg from blocking - * on any delays. */ -static void console_log(ckpool_t __maybe_unused *ckp, char *msg) -{ - /* Add clear line only if stderr is going to console */ - if (isatty(fileno(stderr))) - fprintf(stderr, "\33[2K\r"); - fprintf(stderr, "%s", msg); - fflush(stderr); - - free(msg); -} - -static void proclog(ckpool_t *ckp, char *msg) -{ - time_t log_t = time(NULL); - - /* Reopen log file every minute, allowing us to move/rename it and - * create a new logfile */ - if (log_t > ckp->lastopen_t + 60) { - LOGDEBUG("Reopening logfile"); - open_logfile(ckp); - } - - flock(ckp->logfd, LOCK_EX); - fprintf(ckp->logfp, "%s", msg); - flock(ckp->logfd, LOCK_UN); - - free(msg); -} - -void get_timestamp(char *stamp) -{ - struct tm tm; - tv_t now_tv; - int ms; - - tv_time(&now_tv); - ms = (int)(now_tv.tv_usec / 1000); - localtime_r(&(now_tv.tv_sec), &tm); - sprintf(stamp, "[%d-%02d-%02d %02d:%02d:%02d.%03d]", - tm.tm_year + 1900, - tm.tm_mon + 1, - tm.tm_mday, - tm.tm_hour, - tm.tm_min, - tm.tm_sec, ms); -} - -/* Log everything to the logfile, but display warnings on the console as well */ -void logmsg(int loglevel, const char *fmt, ...) -{ - int logfd = global_ckp->logfd; - char *log, *buf = NULL; - char stamp[128]; - va_list ap; - - if (global_ckp->loglevel < loglevel || !fmt) - return; - - va_start(ap, fmt); - VASPRINTF(&buf, fmt, ap); - va_end(ap); - - if (unlikely(!buf)) { - fprintf(stderr, "Null buffer sent to logmsg\n"); - return; - } - if (unlikely(!strlen(buf))) { - fprintf(stderr, "Zero length string sent to logmsg\n"); - goto out; - } - get_timestamp(stamp); - if (loglevel <= LOG_ERR && errno != 0) - ASPRINTF(&log, "%s %s with errno %d: %s\n", stamp, buf, errno, strerror(errno)); - else - ASPRINTF(&log, "%s %s\n", stamp, buf); - - if (unlikely(!global_ckp->console_logger)) { - fprintf(stderr, "%s", log); - goto out_free; - } - if (loglevel <= LOG_WARNING) - ckmsgq_add(global_ckp->console_logger, strdup(log)); - if (logfd > 0) - ckmsgq_add(global_ckp->logger, strdup(log)); -out_free: - free(log); -out: - free(buf); -} - -/* Generic function for creating a message queue receiving and parsing thread */ -static void *ckmsg_queue(void *arg) -{ - ckmsgq_t *ckmsgq = (ckmsgq_t *)arg; - ckpool_t *ckp = ckmsgq->ckp; - - pthread_detach(pthread_self()); - rename_proc(ckmsgq->name); - ckmsgq->active = true; - - while (42) { - ckmsg_t *msg; - tv_t now; - ts_t abs; - - mutex_lock(ckmsgq->lock); - tv_time(&now); - tv_to_ts(&abs, &now); - abs.tv_sec++; - if (!ckmsgq->msgs) - cond_timedwait(ckmsgq->cond, ckmsgq->lock, &abs); - msg = ckmsgq->msgs; - if (msg) - DL_DELETE(ckmsgq->msgs, msg); - mutex_unlock(ckmsgq->lock); - - if (!msg) - continue; - ckmsgq->func(ckp, msg->data); - free(msg); - } - return NULL; -} - -ckmsgq_t *create_ckmsgq(ckpool_t *ckp, const char *name, const void *func) -{ - ckmsgq_t *ckmsgq = ckzalloc(sizeof(ckmsgq_t)); - - strncpy(ckmsgq->name, name, 15); - ckmsgq->func = func; - ckmsgq->ckp = ckp; - ckmsgq->lock = ckalloc(sizeof(mutex_t)); - ckmsgq->cond = ckalloc(sizeof(pthread_cond_t)); - mutex_init(ckmsgq->lock); - cond_init(ckmsgq->cond); - create_pthread(&ckmsgq->pth, ckmsg_queue, ckmsgq); - - return ckmsgq; -} - -ckmsgq_t *create_ckmsgqs(ckpool_t *ckp, const char *name, const void *func, const int count) -{ - ckmsgq_t *ckmsgq = ckzalloc(sizeof(ckmsgq_t) * count); - mutex_t *lock; - pthread_cond_t *cond; - int i; - - lock = ckalloc(sizeof(mutex_t)); - cond = ckalloc(sizeof(pthread_cond_t)); - mutex_init(lock); - cond_init(cond); - - for (i = 0; i < count; i++) { - snprintf(ckmsgq[i].name, 15, "%.6s%x", name, i); - ckmsgq[i].func = func; - ckmsgq[i].ckp = ckp; - ckmsgq[i].lock = lock; - ckmsgq[i].cond = cond; - create_pthread(&ckmsgq[i].pth, ckmsg_queue, &ckmsgq[i]); - } - - return ckmsgq; -} - -/* Generic function for adding messages to a ckmsgq linked list and signal the - * ckmsgq parsing thread(s) to wake up and process it. */ -bool _ckmsgq_add(ckmsgq_t *ckmsgq, void *data, const char *file, const char *func, const int line) -{ - ckmsg_t *msg; - - if (unlikely(!ckmsgq)) { - LOGWARNING("Sending messages to no queue from %s %s:%d", file, func, line); - /* Discard data if we're unlucky enough to be sending it to - * msg queues not set up during start up */ - free(data); - return false; - } - while (unlikely(!ckmsgq->active)) - cksleep_ms(10); - - msg = ckalloc(sizeof(ckmsg_t)); - msg->data = data; - - mutex_lock(ckmsgq->lock); - ckmsgq->messages++; - DL_APPEND(ckmsgq->msgs, msg); - pthread_cond_broadcast(ckmsgq->cond); - mutex_unlock(ckmsgq->lock); - - return true; -} - -/* Return whether there are any messages queued in the ckmsgq linked list. */ -bool ckmsgq_empty(ckmsgq_t *ckmsgq) -{ - bool ret = true; - - if (unlikely(!ckmsgq || !ckmsgq->active)) - goto out; - - mutex_lock(ckmsgq->lock); - if (ckmsgq->msgs) - ret = (ckmsgq->msgs->next == ckmsgq->msgs->prev); - mutex_unlock(ckmsgq->lock); -out: - return ret; -} - -/* Create a standalone thread that queues received unix messages for a proc - * instance and adds them to linked list of received messages with their - * associated receive socket, then signal the associated rmsg_cond for the - * process to know we have more queued messages. The unix_msg_t ram must be - * freed by the code that removes the entry from the list. */ -static void *unix_receiver(void *arg) -{ - proc_instance_t *pi = (proc_instance_t *)arg; - int rsockd = pi->us.sockd, sockd; - char qname[16]; - - sprintf(qname, "%cunixrq", pi->processname[0]); - rename_proc(qname); - pthread_detach(pthread_self()); - - while (42) { - unix_msg_t *umsg; - char *buf; - - sockd = accept(rsockd, NULL, NULL); - if (unlikely(sockd < 0)) { - LOGEMERG("Failed to accept on %s socket, exiting", qname); - break; - } - buf = recv_unix_msg(sockd); - if (unlikely(!buf)) { - Close(sockd); - LOGWARNING("Failed to get message on %s socket", qname); - continue; - } - umsg = ckalloc(sizeof(unix_msg_t)); - umsg->sockd = sockd; - umsg->buf = buf; - - mutex_lock(&pi->rmsg_lock); - DL_APPEND(pi->unix_msgs, umsg); - pthread_cond_signal(&pi->rmsg_cond); - mutex_unlock(&pi->rmsg_lock); - } - - return NULL; -} - -/* Get the next message in the receive queue, or wait up to 5 seconds for - * the next message, returning NULL if no message is received in that time. */ -unix_msg_t *get_unix_msg(proc_instance_t *pi) -{ - unix_msg_t *umsg; - - mutex_lock(&pi->rmsg_lock); - if (!pi->unix_msgs) { - tv_t now; - ts_t abs; - - tv_time(&now); - tv_to_ts(&abs, &now); - abs.tv_sec += 5; - cond_timedwait(&pi->rmsg_cond, &pi->rmsg_lock, &abs); - } - umsg = pi->unix_msgs; - if (umsg) - DL_DELETE(pi->unix_msgs, umsg); - mutex_unlock(&pi->rmsg_lock); - - return umsg; -} - -static void create_unix_receiver(proc_instance_t *pi) -{ - pthread_t pth; - - mutex_init(&pi->rmsg_lock); - cond_init(&pi->rmsg_cond); - - create_pthread(&pth, unix_receiver, pi); -} - -/* Put a sanity check on kill calls to make sure we are not sending them to - * pid 0. */ -static int kill_pid(const int pid, const int sig) -{ - if (pid < 1) - return -1; - return kill(pid, sig); -} - -static int pid_wait(const pid_t pid, const int ms) -{ - tv_t start, now; - int ret; - - tv_time(&start); - do { - ret = kill_pid(pid, 0); - if (ret) - break; - tv_time(&now); - } while (ms_tvdiff(&now, &start) < ms); - return ret; -} - -static void api_message(ckpool_t *ckp, char **buf, int *sockd) -{ - apimsg_t *apimsg = ckalloc(sizeof(apimsg_t)); - - apimsg->buf = *buf; - *buf = NULL; - apimsg->sockd = *sockd; - *sockd = -1; - ckmsgq_add(ckp->ckpapi, apimsg); -} - -/* Listen for incoming global requests. Always returns a response if possible */ -static void *listener(void *arg) -{ - proc_instance_t *pi = (proc_instance_t *)arg; - unixsock_t *us = &pi->us; - ckpool_t *ckp = pi->ckp; - char *buf = NULL, *msg; - int sockd; - - rename_proc(pi->sockname); -retry: - dealloc(buf); - sockd = accept(us->sockd, NULL, NULL); - if (sockd < 0) { - LOGERR("Failed to accept on socket in listener"); - goto out; - } - - buf = recv_unix_msg(sockd); - if (!buf) { - LOGWARNING("Failed to get message in listener"); - send_unix_msg(sockd, "failed"); - } else if (buf[0] == '{') { - /* Any JSON messages received are for the RPC API to handle */ - api_message(ckp, &buf, &sockd); - } else if (cmdmatch(buf, "shutdown")) { - LOGWARNING("Listener received shutdown message, terminating ckpool"); - send_unix_msg(sockd, "exiting"); - goto out; - } else if (cmdmatch(buf, "ping")) { - LOGDEBUG("Listener received ping request"); - send_unix_msg(sockd, "pong"); - } else if (cmdmatch(buf, "loglevel")) { - int loglevel; - - if (sscanf(buf, "loglevel=%d", &loglevel) != 1) { - LOGWARNING("Failed to parse loglevel message %s", buf); - send_unix_msg(sockd, "Failed"); - } else if (loglevel < LOG_EMERG || loglevel > LOG_DEBUG) { - LOGWARNING("Invalid loglevel %d sent", loglevel); - send_unix_msg(sockd, "Invalid"); - } else { - ckp->loglevel = loglevel; - send_unix_msg(sockd, "success"); - } - } else if (cmdmatch(buf, "getxfd")) { - int fdno = -1; - - sscanf(buf, "getxfd%d", &fdno); - connector_send_fd(ckp, fdno, sockd); - } else if (cmdmatch(buf, "accept")) { - LOGWARNING("Listener received accept message, accepting clients"); - send_proc(ckp->connector, "accept"); - send_unix_msg(sockd, "accepting"); - } else if (cmdmatch(buf, "reject")) { - LOGWARNING("Listener received reject message, rejecting clients"); - send_proc(ckp->connector, "reject"); - send_unix_msg(sockd, "rejecting"); - } else if (cmdmatch(buf, "reconnect")) { - LOGWARNING("Listener received request to send reconnect to clients"); - send_proc(ckp->stratifier, buf); - send_unix_msg(sockd, "reconnecting"); - } else if (cmdmatch(buf, "restart")) { - LOGWARNING("Listener received restart message, attempting handover"); - send_unix_msg(sockd, "restarting"); - if (!fork()) { - if (!ckp->handover) { - ckp->initial_args[ckp->args++] = strdup("-H"); - ckp->initial_args[ckp->args] = NULL; - } - execv(ckp->initial_args[0], (char *const *)ckp->initial_args); - } - } else if (cmdmatch(buf, "stratifierstats")) { - LOGDEBUG("Listener received stratifierstats request"); - msg = stratifier_stats(ckp, ckp->sdata); - send_unix_msg(sockd, msg); - dealloc(msg); - } else if (cmdmatch(buf, "connectorstats")) { - LOGDEBUG("Listener received connectorstats request"); - msg = connector_stats(ckp->cdata, 0); - send_unix_msg(sockd, msg); - dealloc(msg); - } else if (cmdmatch(buf, "resetshares")) { - LOGWARNING("Resetting best shares"); - send_proc(ckp->stratifier, buf); - send_unix_msg(sockd, "resetting"); - } else { - LOGINFO("Listener received unhandled message: %s", buf); - send_unix_msg(sockd, "unknown"); - } - Close(sockd); - goto retry; -out: - dealloc(buf); - close_unix_socket(us->sockd, us->path); - return NULL; -} - -void empty_buffer(connsock_t *cs) -{ - if (cs->buf) - cs->buf[0] = '\0'; - cs->buflen = cs->bufofs = 0; -} - -int set_sendbufsize(ckpool_t *ckp, const int fd, const int len) -{ - socklen_t optlen; - int opt; - - optlen = sizeof(opt); - opt = len * 4 / 3; - setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &opt, optlen); - getsockopt(fd, SOL_SOCKET, SO_SNDBUF, &opt, &optlen); - opt /= 2; - if (opt < len) { - LOGDEBUG("Failed to set desired sendbufsize of %d unprivileged, only got %d", - len, opt); - optlen = sizeof(opt); - opt = len * 4 / 3; - setsockopt(fd, SOL_SOCKET, SO_SNDBUFFORCE, &opt, optlen); - getsockopt(fd, SOL_SOCKET, SO_SNDBUF, &opt, &optlen); - opt /= 2; - } - if (opt < len) { - LOGNOTICE("Failed to increase sendbufsize to %d, increase wmem_max or start %s privileged if using a remote btcd", - len, ckp->name); - ckp->wmem_warn = true; - } else - LOGDEBUG("Increased sendbufsize to %d of desired %d", opt, len); - return opt; -} - -int set_recvbufsize(ckpool_t *ckp, const int fd, const int len) -{ - socklen_t optlen; - int opt; - - optlen = sizeof(opt); - opt = len * 4 / 3; - setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &opt, optlen); - getsockopt(fd, SOL_SOCKET, SO_RCVBUF, &opt, &optlen); - opt /= 2; - if (opt < len) { - LOGDEBUG("Failed to set desired rcvbufsiz of %d unprivileged, only got %d", - len, opt); - optlen = sizeof(opt); - opt = len * 4 / 3; - setsockopt(fd, SOL_SOCKET, SO_RCVBUFFORCE, &opt, optlen); - getsockopt(fd, SOL_SOCKET, SO_RCVBUF, &opt, &optlen); - opt /= 2; - } - if (opt < len) { - LOGNOTICE("Failed to increase rcvbufsiz to %d, increase rmem_max or start %s privileged if using a remote btcd", - len, ckp->name); - ckp->rmem_warn = true; - } else - LOGDEBUG("Increased rcvbufsiz to %d of desired %d", opt, len); - return opt; -} - -/* If there is any cs->buflen it implies a full line was received on the last - * pass through read_socket_line and subsequently processed, leaving - * unprocessed data beyond cs->bufofs. Otherwise a zero buflen means there is - * only unprocessed data of bufofs length. */ -static void clear_bufline(connsock_t *cs) -{ - if (unlikely(!cs->buf)) { - socklen_t optlen = sizeof(cs->rcvbufsiz); - - cs->buf = ckzalloc(PAGESIZE); - cs->bufsize = PAGESIZE; - getsockopt(cs->fd, SOL_SOCKET, SO_RCVBUF, &cs->rcvbufsiz, &optlen); - cs->rcvbufsiz /= 2; - LOGDEBUG("connsock rcvbufsiz detected as %d", cs->rcvbufsiz); - } else if (cs->buflen) { - memmove(cs->buf, cs->buf + cs->bufofs, cs->buflen); - memset(cs->buf + cs->buflen, 0, cs->bufofs); - cs->bufofs = cs->buflen; - cs->buflen = 0; - cs->buf[cs->bufofs] = '\0'; - } -} - -static void add_buflen(ckpool_t *ckp, connsock_t *cs, const char *readbuf, const int len) -{ - int backoff = 1; - int buflen; - - buflen = round_up_page(cs->bufofs + len + 1); - while (cs->bufsize < buflen) { - char *newbuf = realloc(cs->buf, buflen); - - if (likely(newbuf)) { - cs->bufsize = buflen; - cs->buf = newbuf; - break; - } - if (backoff == 1) - fprintf(stderr, "Failed to realloc %d in read_socket_line, retrying\n", (int)buflen); - cksleep_ms(backoff); - backoff <<= 1; - } - /* Increase receive buffer if possible to larger than the largest - * message we're likely to buffer */ - if (unlikely(!ckp->rmem_warn && buflen > cs->rcvbufsiz)) - cs->rcvbufsiz = set_recvbufsize(ckp, cs->fd, buflen); - - memcpy(cs->buf + cs->bufofs, readbuf, len); - cs->bufofs += len; - cs->buf[cs->bufofs] = '\0'; -} - -/* Receive as much data is currently available without blocking into a connsock - * buffer. Returns total length of data read. */ -static int recv_available(ckpool_t *ckp, connsock_t *cs) -{ - char readbuf[PAGESIZE]; - int len = 0, ret; - - do { - ret = recv(cs->fd, readbuf, PAGESIZE - 4, MSG_DONTWAIT); - if (ret > 0) { - add_buflen(ckp, cs, readbuf, ret); - len += ret; - } - } while (ret > 0); - - return len; -} - -/* Read from a socket into cs->buf till we get an '\n', converting it to '\0' - * and storing how much extra data we've received, to be moved to the beginning - * of the buffer for use on the next receive. Returns length of the line if a - * whole line is received, zero if none/some data is received without an EOL - * and -1 on error. */ -int read_socket_line(connsock_t *cs, float *timeout) -{ - ckpool_t *ckp = cs->ckp; - bool quiet = ckp->proxy | ckp->remote; - char *eom = NULL; - tv_t start, now; - float diff; - int ret; - - clear_bufline(cs); - recv_available(ckp, cs); // Intentionally ignore return value - eom = memchr(cs->buf, '\n', cs->bufofs); - - tv_time(&start); - - while (!eom) { - if (unlikely(cs->fd < 0)) { - ret = -1; - goto out; - } - - if (*timeout < 0) { - if (quiet) - LOGINFO("Timed out in read_socket_line"); - else - LOGERR("Timed out in read_socket_line"); - ret = 0; - goto out; - } - ret = wait_read_select(cs->fd, *timeout); - if (ret < 1) { - if (quiet) - LOGINFO("Select %s in read_socket_line", !ret ? "timed out" : "failed"); - else - LOGERR("Select %s in read_socket_line", !ret ? "timed out" : "failed"); - goto out; - } - ret = recv_available(ckp, cs); - if (ret < 1) { - /* If we have done wait_read_select there should be - * something to read and if we get nothing it means the - * socket is closed. */ - if (quiet) - LOGINFO("Failed to recv in read_socket_line"); - else - LOGERR("Failed to recv in read_socket_line"); - ret = -1; - goto out; - } - eom = memchr(cs->buf, '\n', cs->bufofs); - tv_time(&now); - diff = tvdiff(&now, &start); - copy_tv(&start, &now); - *timeout -= diff; - } - ret = eom - cs->buf; - - cs->buflen = cs->buf + cs->bufofs - eom - 1; - if (cs->buflen) - cs->bufofs = eom - cs->buf + 1; - else - cs->bufofs = 0; - *eom = '\0'; -out: - if (ret < 0) { - empty_buffer(cs); - dealloc(cs->buf); - } - return ret; -} - -/* We used to send messages between each proc_instance via unix sockets when - * ckpool was a multi-process model but that is no longer required so we can - * place the messages directly on the other proc_instance's queue until we - * deprecate this mechanism. */ -void _queue_proc(proc_instance_t *pi, const char *msg, const char *file, const char *func, const int line) -{ - unix_msg_t *umsg; - - if (unlikely(!msg || !strlen(msg))) { - LOGWARNING("Null msg passed to queue_proc from %s %s:%d", file, func, line); - return; - } - umsg = ckalloc(sizeof(unix_msg_t)); - umsg->sockd = -1; - umsg->buf = strdup(msg); - - mutex_lock(&pi->rmsg_lock); - DL_APPEND(pi->unix_msgs, umsg); - pthread_cond_signal(&pi->rmsg_cond); - mutex_unlock(&pi->rmsg_lock); -} - -/* Send a single message to a process instance and retrieve the response, then - * close the socket. */ -char *_send_recv_proc(const proc_instance_t *pi, const char *msg, int writetimeout, int readtimedout, - const char *file, const char *func, const int line) -{ - char *path = pi->us.path, *buf = NULL; - int sockd; - - if (unlikely(!path || !strlen(path))) { - LOGERR("Attempted to send message %s to null path in send_proc", msg ? msg : ""); - goto out; - } - if (unlikely(!msg || !strlen(msg))) { - LOGERR("Attempted to send null message to socket %s in send_proc", path); - goto out; - } - sockd = open_unix_client(path); - if (unlikely(sockd < 0)) { - LOGWARNING("Failed to open socket %s in send_recv_proc", path); - goto out; - } - if (unlikely(!_send_unix_msg(sockd, msg, writetimeout, file, func, line))) - LOGWARNING("Failed to send %s to socket %s", msg, path); - else - buf = _recv_unix_msg(sockd, readtimedout, readtimedout, file, func, line); - Close(sockd); -out: - if (unlikely(!buf)) - LOGERR("Failure in send_recv_proc from %s %s:%d", file, func, line); - return buf; -} - -static const char *rpc_method(const char *rpc_req) -{ - const char *ptr = strchr(rpc_req, ':'); - if (ptr) - return ptr+1; - return rpc_req; -} - -/* All of these calls are made to bitcoind which prefers open/close instead - * of persistent connections so cs->fd is always invalid. */ -static json_t *_json_rpc_call(connsock_t *cs, const char *rpc_req, const bool info_only) -{ - float timeout = RPC_TIMEOUT; - char *http_req = NULL; - json_error_t err_val; - char *warning = NULL; - json_t *val = NULL; - tv_t stt_tv, fin_tv; - double elapsed; - int len, ret; - - /* Serialise all calls in case we use cs from multiple threads */ - cksem_wait(&cs->sem); - cs->fd = connect_socket(cs->url, cs->port); - if (unlikely(cs->fd < 0)) { - ASPRINTF(&warning, "Unable to connect socket to %s:%s in %s", cs->url, cs->port, __func__); - goto out; - } - if (unlikely(!cs->url)) { - ASPRINTF(&warning, "No URL in %s", __func__); - goto out; - } - if (unlikely(!cs->port)) { - ASPRINTF(&warning, "No port in %s", __func__); - goto out; - } - if (unlikely(!cs->auth)) { - ASPRINTF(&warning, "No auth in %s", __func__); - goto out; - } - if (unlikely(!rpc_req)) { - ASPRINTF(&warning, "Null rpc_req passed to %s", __func__); - goto out; - } - len = strlen(rpc_req); - if (unlikely(!len)) { - ASPRINTF(&warning, "Zero length rpc_req passed to %s", __func__); - goto out; - } - http_req = ckalloc(len + 256); // Leave room for headers - sprintf(http_req, - "POST / HTTP/1.1\n" - "Authorization: Basic %s\n" - "Host: %s:%s\n" - "Content-type: application/json\n" - "Content-Length: %d\n\n%s", - cs->auth, cs->url, cs->port, len, rpc_req); - - len = strlen(http_req); - tv_time(&stt_tv); - ret = write_socket(cs->fd, http_req, len); - if (ret != len) { - tv_time(&fin_tv); - elapsed = tvdiff(&fin_tv, &stt_tv); - ASPRINTF(&warning, "Failed to write to socket in %s (%.10s...) %.3fs", - __func__, rpc_method(rpc_req), elapsed); - goto out_empty; - } - ret = read_socket_line(cs, &timeout); - if (ret < 1) { - tv_time(&fin_tv); - elapsed = tvdiff(&fin_tv, &stt_tv); - ASPRINTF(&warning, "Failed to read socket line in %s (%.10s...) %.3fs", - __func__, rpc_method(rpc_req), elapsed); - goto out_empty; - } - if (strncasecmp(cs->buf, "HTTP/1.1 200 OK", 15)) { - tv_time(&fin_tv); - elapsed = tvdiff(&fin_tv, &stt_tv); - ASPRINTF(&warning, "HTTP response to (%.10s...) %.3fs not ok: %s", - rpc_method(rpc_req), elapsed, cs->buf); - timeout = 0; - /* Look for a json response if there is one */ - while (read_socket_line(cs, &timeout) > 0) { - timeout = 0; - if (*cs->buf != '{') - continue; - free(warning); - /* Replace the warning with the json response */ - ASPRINTF(&warning, "JSON response to (%.10s...) %.3fs not ok: %s", - rpc_method(rpc_req), elapsed, cs->buf); - break; - } - goto out_empty; - } - do { - ret = read_socket_line(cs, &timeout); - if (ret < 1) { - tv_time(&fin_tv); - elapsed = tvdiff(&fin_tv, &stt_tv); - ASPRINTF(&warning, "Failed to read http socket lines in %s (%.10s...) %.3fs", - __func__, rpc_method(rpc_req), elapsed); - goto out_empty; - } - } while (strncmp(cs->buf, "{", 1)); - tv_time(&fin_tv); - elapsed = tvdiff(&fin_tv, &stt_tv); - if (elapsed > 5.0) { - ASPRINTF(&warning, "HTTP socket read+write took %.3fs in %s (%.10s...)", - elapsed, __func__, rpc_method(rpc_req)); - } - - val = json_loads(cs->buf, 0, &err_val); - if (!val) { - ASPRINTF(&warning, "JSON decode (%.10s...) failed(%d): %s", - rpc_method(rpc_req), err_val.line, err_val.text); - } -out_empty: - empty_socket(cs->fd); - empty_buffer(cs); -out: - if (warning) { - if (info_only) - LOGINFO("%s", warning); - else - LOGWARNING("%s", warning); - free(warning); - } - Close(cs->fd); - free(http_req); - dealloc(cs->buf); - cksem_post(&cs->sem); - return val; -} - -json_t *json_rpc_call(connsock_t *cs, const char *rpc_req) -{ - return _json_rpc_call(cs, rpc_req, false); -} - -json_t *json_rpc_response(connsock_t *cs, const char *rpc_req) -{ - return _json_rpc_call(cs, rpc_req, true); -} - -/* For when we are submitting information that is not important and don't care - * about the response. */ -void json_rpc_msg(connsock_t *cs, const char *rpc_req) -{ - json_t *val = _json_rpc_call(cs, rpc_req, true); - - /* We don't care about the result */ - json_decref(val); -} - -static void terminate_oldpid(const ckpool_t *ckp, proc_instance_t *pi, const pid_t oldpid) -{ - if (!ckp->killold) { - quit(1, "Process %s pid %d still exists, start ckpool with -H to get a handover or -k if you wish to kill it", - pi->processname, oldpid); - } - LOGNOTICE("Terminating old process %s pid %d", pi->processname, oldpid); - if (kill_pid(oldpid, 15)) - quit(1, "Unable to kill old process %s pid %d", pi->processname, oldpid); - LOGWARNING("Terminating old process %s pid %d", pi->processname, oldpid); - if (pid_wait(oldpid, 500)) - return; - LOGWARNING("Old process %s pid %d failed to respond to terminate request, killing", - pi->processname, oldpid); - if (kill_pid(oldpid, 9) || !pid_wait(oldpid, 3000)) - quit(1, "Unable to kill old process %s pid %d", pi->processname, oldpid); -} - -/* This is for blocking sends of json messages */ -bool _send_json_msg(connsock_t *cs, const json_t *json_msg, const char *file, const char *func, const int line) -{ - bool ret = false; - int len, sent; - char *s; - - if (unlikely(!json_msg)) { - LOGWARNING("Empty json msg in send_json_msg from %s %s:%d", file, func, line); - goto out; - } - s = json_dumps(json_msg, JSON_ESCAPE_SLASH | JSON_EOL); - if (unlikely(!s)) { - LOGWARNING("Empty json dump in send_json_msg from %s %s:%d", file, func, line); - goto out; - } - LOGDEBUG("Sending json msg: %s", s); - len = strlen(s); - if (unlikely(!len)) { - LOGWARNING("Zero length string in send_json_msg from %s %s:%d", file, func, line); - goto out; - } - sent = write_socket(cs->fd, s, len); - dealloc(s); - if (sent != len) { - LOGNOTICE("Failed to send %d bytes sent %d in send_json_msg", len, sent); - goto out; - } - ret = true; -out: - return ret; -} - -/* Decode a string that should have a json message and return just the contents - * of the result key or NULL. */ -static json_t *json_result(json_t *val) -{ - json_t *res_val = NULL, *err_val; - - res_val = json_object_get(val, "result"); - /* (null) is a valid result while no value is an error, so mask out - * (null) and only handle lack of result */ - if (json_is_null(res_val)) - res_val = NULL; - else if (!res_val) { - char *ss; - - err_val = json_object_get(val, "error"); - if (err_val) - ss = json_dumps(err_val, 0); - else - ss = strdup("(unknown reason)"); - - LOGNOTICE("JSON-RPC decode of json_result failed: %s", ss); - free(ss); - } - return res_val; -} - -/* Return the error value if one exists */ -static json_t *json_errval(json_t *val) -{ - json_t *err_val = json_object_get(val, "error"); - - return err_val; -} - -/* Parse a string and return the json value it contains, if any, and the - * result in res_val. Return NULL if no result key is found. */ -json_t *json_msg_result(const char *msg, json_t **res_val, json_t **err_val) -{ - json_error_t err; - json_t *val; - - *res_val = NULL; - val = json_loads(msg, 0, &err); - if (!val) { - LOGWARNING("Json decode failed(%d): %s", err.line, err.text); - goto out; - } - *res_val = json_result(val); - *err_val = json_errval(val); - -out: - return val; -} - -/* Open the file in path, check if there is a pid in there that still exists - * and if not, write the pid into that file. */ -static bool write_pid(ckpool_t *ckp, const char *path, proc_instance_t *pi, const pid_t pid, const pid_t oldpid) -{ - FILE *fp; - - if (ckp->handover && oldpid && !pid_wait(oldpid, 500)) { - LOGWARNING("Old process pid %d failed to shutdown cleanly, terminating", oldpid); - terminate_oldpid(ckp, pi, oldpid); - } - - fp = fopen(path, "we"); - if (!fp) { - LOGERR("Failed to open file %s", path); - return false; - } - fprintf(fp, "%d", pid); - fclose(fp); - - return true; -} - -static void name_process_sockname(unixsock_t *us, const proc_instance_t *pi) -{ - us->path = strdup(pi->ckp->socket_dir); - realloc_strcat(&us->path, pi->sockname); -} - -static void open_process_sock(ckpool_t *ckp, const proc_instance_t *pi, unixsock_t *us) -{ - LOGDEBUG("Opening %s", us->path); - us->sockd = open_unix_server(us->path); - if (unlikely(us->sockd < 0)) - quit(1, "Failed to open %s socket", pi->sockname); - if (chown(us->path, -1, ckp->gr_gid)) - quit(1, "Failed to set %s to group id %d", us->path, ckp->gr_gid); -} - -static void create_process_unixsock(proc_instance_t *pi) -{ - unixsock_t *us = &pi->us; - ckpool_t *ckp = pi->ckp; - - name_process_sockname(us, pi); - open_process_sock(ckp, pi, us); -} - -static void write_namepid(proc_instance_t *pi) -{ - char s[256]; - - pi->pid = getpid(); - sprintf(s, "%s%s.pid", pi->ckp->socket_dir, pi->processname); - if (!write_pid(pi->ckp, s, pi, pi->pid, pi->oldpid)) - quit(1, "Failed to write %s pid %d", pi->processname, pi->pid); -} - -static void rm_namepid(const proc_instance_t *pi) -{ - char s[256]; - - sprintf(s, "%s%s.pid", pi->ckp->socket_dir, pi->processname); - unlink(s); -} - -static void launch_logger(ckpool_t *ckp) -{ - ckp->logger = create_ckmsgq(ckp, "logger", &proclog); - ckp->console_logger = create_ckmsgq(ckp, "conlog", &console_log); -} - -static void clean_up(ckpool_t *ckp) -{ - rm_namepid(&ckp->main); - dealloc(ckp->socket_dir); -} - -static void cancel_pthread(pthread_t *pth) -{ - if (!pth || !*pth) - return; - pthread_cancel(*pth); - pth = NULL; -} - -static void sighandler(const int sig) -{ - ckpool_t *ckp = global_ckp; - - signal(sig, SIG_IGN); - signal(SIGTERM, SIG_IGN); - LOGWARNING("Process %s received signal %d, shutting down", - ckp->name, sig); - - cancel_pthread(&ckp->pth_listener); - exit(0); -} - -static bool _json_get_string(char **store, const json_t *entry, const char *res) -{ - bool ret = false; - const char *buf; - - *store = NULL; - if (!entry || json_is_null(entry)) { - LOGDEBUG("Json did not find entry %s", res); - goto out; - } - if (!json_is_string(entry)) { - LOGWARNING("Json entry %s is not a string", res); - goto out; - } - buf = json_string_value(entry); - LOGDEBUG("Json found entry %s: %s", res, buf); - *store = strdup(buf); - ret = true; -out: - return ret; -} - -bool json_get_string(char **store, const json_t *val, const char *res) -{ - return _json_get_string(store, json_object_get(val, res), res); -} - -/* Used when there must be a valid string */ -static void json_get_configstring(char **store, const json_t *val, const char *res) -{ - bool ret = _json_get_string(store, json_object_get(val, res), res); - - if (!ret) { - LOGEMERG("Invalid config string or missing object for %s", res); - exit(1); - } -} - -bool json_get_int64(int64_t *store, const json_t *val, const char *res) -{ - json_t *entry = json_object_get(val, res); - bool ret = false; - - if (!entry) { - LOGDEBUG("Json did not find entry %s", res); - goto out; - } - if (!json_is_integer(entry)) { - LOGINFO("Json entry %s is not an integer", res); - goto out; - } - *store = json_integer_value(entry); - LOGDEBUG("Json found entry %s: %"PRId64, res, *store); - ret = true; -out: - return ret; -} - -bool json_get_int(int *store, const json_t *val, const char *res) -{ - json_t *entry = json_object_get(val, res); - bool ret = false; - - if (!entry) { - LOGDEBUG("Json did not find entry %s", res); - goto out; - } - if (!json_is_integer(entry)) { - LOGWARNING("Json entry %s is not an integer", res); - goto out; - } - *store = json_integer_value(entry); - LOGDEBUG("Json found entry %s: %d", res, *store); - ret = true; -out: - return ret; -} - -bool json_get_double(double *store, const json_t *val, const char *res) -{ - json_t *entry = json_object_get(val, res); - bool ret = false; - - if (!entry) { - LOGDEBUG("Json did not find entry %s", res); - goto out; - } - if (!json_is_real(entry)) { - LOGWARNING("Json entry %s is not a double", res); - goto out; - } - *store = json_real_value(entry); - LOGDEBUG("Json found entry %s: %f", res, *store); - ret = true; -out: - return ret; -} - -bool json_get_uint32(uint32_t *store, const json_t *val, const char *res) -{ - json_t *entry = json_object_get(val, res); - bool ret = false; - - if (!entry) { - LOGDEBUG("Json did not find entry %s", res); - goto out; - } - if (!json_is_integer(entry)) { - LOGWARNING("Json entry %s is not an integer", res); - goto out; - } - *store = json_integer_value(entry); - LOGDEBUG("Json found entry %s: %u", res, *store); - ret = true; -out: - return ret; -} - -bool json_get_bool(bool *store, const json_t *val, const char *res) -{ - json_t *entry = json_object_get(val, res); - bool ret = false; - - if (!entry) { - LOGDEBUG("Json did not find entry %s", res); - goto out; - } - if (!json_is_boolean(entry)) { - LOGINFO("Json entry %s is not a boolean", res); - goto out; - } - *store = json_is_true(entry); - LOGDEBUG("Json found entry %s: %s", res, *store ? "true" : "false"); - ret = true; -out: - return ret; -} - -bool json_getdel_int(int *store, json_t *val, const char *res) -{ - bool ret; - - ret = json_get_int(store, val, res); - if (ret) - json_object_del(val, res); - return ret; -} - -bool json_getdel_int64(int64_t *store, json_t *val, const char *res) -{ - bool ret; - - ret = json_get_int64(store, val, res); - if (ret) - json_object_del(val, res); - return ret; -} - -static void parse_btcds(ckpool_t *ckp, const json_t *arr_val, const int arr_size) -{ - json_t *val; - int i; - - ckp->btcds = arr_size; - ckp->btcdurl = ckzalloc(sizeof(char *) * arr_size); - ckp->btcdauth = ckzalloc(sizeof(char *) * arr_size); - ckp->btcdpass = ckzalloc(sizeof(char *) * arr_size); - ckp->btcdnotify = ckzalloc(sizeof(bool *) * arr_size); - for (i = 0; i < arr_size; i++) { - val = json_array_get(arr_val, i); - json_get_configstring(&ckp->btcdurl[i], val, "url"); - json_get_configstring(&ckp->btcdauth[i], val, "auth"); - json_get_configstring(&ckp->btcdpass[i], val, "pass"); - json_get_bool(&ckp->btcdnotify[i], val, "notify"); - } -} - -static void parse_proxies(ckpool_t *ckp, const json_t *arr_val, const int arr_size) -{ - json_t *val; - int i; - - ckp->proxies = arr_size; - ckp->proxyurl = ckzalloc(sizeof(char *) * arr_size); - ckp->proxyauth = ckzalloc(sizeof(char *) * arr_size); - ckp->proxypass = ckzalloc(sizeof(char *) * arr_size); - for (i = 0; i < arr_size; i++) { - val = json_array_get(arr_val, i); - json_get_configstring(&ckp->proxyurl[i], val, "url"); - json_get_configstring(&ckp->proxyauth[i], val, "auth"); - if (!json_get_string(&ckp->proxypass[i], val, "pass")) - ckp->proxypass[i] = strdup(""); - } -} - -static bool parse_serverurls(ckpool_t *ckp, const json_t *arr_val) -{ - bool ret = false; - int arr_size, i; - - if (!arr_val) - goto out; - if (!json_is_array(arr_val)) { - LOGINFO("Unable to parse serverurl entries as an array"); - goto out; - } - arr_size = json_array_size(arr_val); - if (!arr_size) { - LOGWARNING("Serverurl array empty"); - goto out; - } - ckp->serverurls = arr_size; - ckp->serverurl = ckalloc(sizeof(char *) * arr_size); - ckp->server_highdiff = ckzalloc(sizeof(bool) * arr_size); - ckp->nodeserver = ckzalloc(sizeof(bool) * arr_size); - ckp->trusted = ckzalloc(sizeof(bool) * arr_size); - for (i = 0; i < arr_size; i++) { - json_t *val = json_array_get(arr_val, i); - - if (!_json_get_string(&ckp->serverurl[i], val, "serverurl")) - LOGWARNING("Invalid serverurl entry number %d", i); - } - ret = true; -out: - return ret; -} - -static void parse_nodeservers(ckpool_t *ckp, const json_t *arr_val) -{ - int arr_size, i, j, total_urls; - - if (!arr_val) - return; - if (!json_is_array(arr_val)) { - LOGWARNING("Unable to parse nodeservers entries as an array"); - return; - } - arr_size = json_array_size(arr_val); - if (!arr_size) { - LOGWARNING("Nodeserver array empty"); - return; - } - total_urls = ckp->serverurls + arr_size; - ckp->serverurl = realloc(ckp->serverurl, sizeof(char *) * total_urls); - ckp->nodeserver = realloc(ckp->nodeserver, sizeof(bool) * total_urls); - ckp->trusted = realloc(ckp->trusted, sizeof(bool) * total_urls); - for (i = 0, j = ckp->serverurls; j < total_urls; i++, j++) { - json_t *val = json_array_get(arr_val, i); - - if (!_json_get_string(&ckp->serverurl[j], val, "nodeserver")) - LOGWARNING("Invalid nodeserver entry number %d", i); - ckp->nodeserver[j] = true; - ckp->nodeservers++; - } - ckp->serverurls = total_urls; -} - -static void parse_trusted(ckpool_t *ckp, const json_t *arr_val) -{ - int arr_size, i, j, total_urls; - - if (!arr_val) - return; - if (!json_is_array(arr_val)) { - LOGWARNING("Unable to parse trusted server entries as an array"); - return; - } - arr_size = json_array_size(arr_val); - if (!arr_size) { - LOGWARNING("Trusted array empty"); - return; - } - total_urls = ckp->serverurls + arr_size; - ckp->serverurl = realloc(ckp->serverurl, sizeof(char *) * total_urls); - ckp->nodeserver = realloc(ckp->nodeserver, sizeof(bool) * total_urls); - ckp->trusted = realloc(ckp->trusted, sizeof(bool) * total_urls); - for (i = 0, j = ckp->serverurls; j < total_urls; i++, j++) { - json_t *val = json_array_get(arr_val, i); - - if (!_json_get_string(&ckp->serverurl[j], val, "trusted")) - LOGWARNING("Invalid trusted server entry number %d", i); - ckp->trusted[j] = true; - } - ckp->serverurls = total_urls; -} - - -static bool parse_redirecturls(ckpool_t *ckp, const json_t *arr_val) -{ - bool ret = false; - int arr_size, i; - char *redirecturl, url[INET6_ADDRSTRLEN], port[8]; - redirecturl = alloca(INET6_ADDRSTRLEN); - - if (!arr_val) - goto out; - if (!json_is_array(arr_val)) { - LOGNOTICE("Unable to parse redirecturl entries as an array"); - goto out; - } - arr_size = json_array_size(arr_val); - if (!arr_size) { - LOGWARNING("redirecturl array empty"); - goto out; - } - ckp->redirecturls = arr_size; - ckp->redirecturl = ckalloc(sizeof(char *) * arr_size); - ckp->redirectport = ckalloc(sizeof(char *) * arr_size); - for (i = 0; i < arr_size; i++) { - json_t *val = json_array_get(arr_val, i); - - strncpy(redirecturl, json_string_value(val), INET6_ADDRSTRLEN - 1); - /* See that the url properly resolves */ - if (!url_from_serverurl(redirecturl, url, port)) - quit(1, "Invalid redirecturl entry %d %s", i, redirecturl); - ckp->redirecturl[i] = strdup(strsep(&redirecturl, ":")); - ckp->redirectport[i] = strdup(port); - } - ret = true; -out: - return ret; -} - - -static void parse_config(ckpool_t *ckp) -{ - json_t *json_conf, *arr_val; - json_error_t err_val; - char *url, *vmask; - int arr_size; - - json_conf = json_load_file(ckp->config, JSON_DISABLE_EOF_CHECK, &err_val); - if (!json_conf) { - LOGWARNING("Json decode error for config file %s: (%d): %s", ckp->config, - err_val.line, err_val.text); - return; - } - arr_val = json_object_get(json_conf, "btcd"); - if (arr_val && json_is_array(arr_val)) { - arr_size = json_array_size(arr_val); - if (arr_size) - parse_btcds(ckp, arr_val, arr_size); - } - json_get_string(&ckp->btcaddress, json_conf, "btcaddress"); - json_get_string(&ckp->btcsig, json_conf, "btcsig"); - if (ckp->btcsig && strlen(ckp->btcsig) > 38) { - LOGWARNING("Signature %s too long, truncating to 38 bytes", ckp->btcsig); - ckp->btcsig[38] = '\0'; - } - json_get_int(&ckp->blockpoll, json_conf, "blockpoll"); - json_get_int(&ckp->nonce1length, json_conf, "nonce1length"); - json_get_int(&ckp->nonce2length, json_conf, "nonce2length"); - json_get_int(&ckp->update_interval, json_conf, "update_interval"); - json_get_string(&vmask, json_conf, "version_mask"); - if (vmask && strlen(vmask) && validhex(vmask)) - sscanf(vmask, "%x", &ckp->version_mask); - else - ckp->version_mask = 0x1fffe000; - /* Look for an array first and then a single entry */ - arr_val = json_object_get(json_conf, "serverurl"); - if (!parse_serverurls(ckp, arr_val)) { - if (json_get_string(&url, json_conf, "serverurl")) { - ckp->serverurl = ckalloc(sizeof(char *)); - ckp->serverurl[0] = url; - ckp->serverurls = 1; - } - } - arr_val = json_object_get(json_conf, "nodeserver"); - parse_nodeservers(ckp, arr_val); - arr_val = json_object_get(json_conf, "trusted"); - parse_trusted(ckp, arr_val); - json_get_string(&ckp->upstream, json_conf, "upstream"); - json_get_double(&ckp->mindiff, json_conf, "mindiff"); - json_get_double(&ckp->startdiff, json_conf, "startdiff"); - json_get_double(&ckp->highdiff, json_conf, "highdiff"); - json_get_double(&ckp->maxdiff, json_conf, "maxdiff"); - json_get_string(&ckp->logdir, json_conf, "logdir"); - json_get_int(&ckp->maxclients, json_conf, "maxclients"); - json_get_double(&ckp->donation, json_conf, "donation"); - /* Avoid dust-sized donations */ - if (ckp->donation < 0.1) - ckp->donation = 0; - else if (ckp->donation > 99.9) - ckp->donation = 99.9; - arr_val = json_object_get(json_conf, "proxy"); - if (arr_val && json_is_array(arr_val)) { - arr_size = json_array_size(arr_val); - if (arr_size) - parse_proxies(ckp, arr_val, arr_size); - } - arr_val = json_object_get(json_conf, "redirecturl"); - if (arr_val) - parse_redirecturls(ckp, arr_val); - json_get_string(&ckp->zmqblock, json_conf, "zmqblock"); - - json_decref(json_conf); -} - -static void manage_old_instance(ckpool_t *ckp, proc_instance_t *pi) -{ - struct stat statbuf; - char path[256]; - FILE *fp; - - sprintf(path, "%s%s.pid", pi->ckp->socket_dir, pi->processname); - if (!stat(path, &statbuf)) { - int oldpid, ret; - - LOGNOTICE("File %s exists", path); - fp = fopen(path, "re"); - if (!fp) - quit(1, "Failed to open file %s", path); - ret = fscanf(fp, "%d", &oldpid); - fclose(fp); - if (ret == 1 && !(kill_pid(oldpid, 0))) { - LOGNOTICE("Old process %s pid %d still exists", pi->processname, oldpid); - if (ckp->handover) { - LOGINFO("Saving pid to be handled at handover"); - pi->oldpid = oldpid; - return; - } - terminate_oldpid(ckp, pi, oldpid); - } - } -} - -static void prepare_child(ckpool_t *ckp, proc_instance_t *pi, void *process, char *name) -{ - pi->ckp = ckp; - pi->processname = name; - pi->sockname = pi->processname; - create_process_unixsock(pi); - create_pthread(&pi->pth_process, process, pi); - create_unix_receiver(pi); -} - -static struct option long_options[] = { - {"btcsolo", no_argument, 0, 'B'}, - {"config", required_argument, 0, 'c'}, - {"daemonise", no_argument, 0, 'D'}, - {"group", required_argument, 0, 'g'}, - {"handover", no_argument, 0, 'H'}, - {"help", no_argument, 0, 'h'}, - {"killold", no_argument, 0, 'k'}, - {"log-shares", no_argument, 0, 'L'}, - {"loglevel", required_argument, 0, 'l'}, - {"name", required_argument, 0, 'n'}, - {"node", no_argument, 0, 'N'}, - {"passthrough", no_argument, 0, 'P'}, - {"proxy", no_argument, 0, 'p'}, - {"quiet", no_argument, 0, 'q'}, - {"redirector", no_argument, 0, 'R'}, - {"sockdir", required_argument, 0, 's'}, - {"trusted", no_argument, 0, 't'}, - {"userproxy", no_argument, 0, 'u'}, - {0, 0, 0, 0} -}; - -static bool send_recv_path(const char *path, const char *msg) -{ - int sockd = open_unix_client(path); - bool ret = false; - char *response; - - send_unix_msg(sockd, msg); - response = recv_unix_msg(sockd); - if (response) { - ret = true; - LOGWARNING("Received: %s in response to %s request", response, msg); - dealloc(response); - } else - LOGWARNING("Received no response to %s request", msg); - Close(sockd); - return ret; -} - -int main(int argc, char **argv) -{ - struct sigaction handler; - int c, ret, i = 0, j; - char buf[512] = {}; - char *appname; - ckpool_t ckp; - - /* Make significant floating point errors fatal to avoid subtle bugs being missed */ - feenableexcept(FE_DIVBYZERO | FE_INVALID); - json_set_alloc_funcs(json_ckalloc, free); - - global_ckp = &ckp; - memset(&ckp, 0, sizeof(ckp)); - ckp.starttime = time(NULL); - ckp.startpid = getpid(); - ckp.loglevel = LOG_NOTICE; - ckp.initial_args = ckalloc(sizeof(char *) * (argc + 2)); /* Leave room for extra -H */ - for (ckp.args = 0; ckp.args < argc; ckp.args++) - ckp.initial_args[ckp.args] = strdup(argv[ckp.args]); - ckp.initial_args[ckp.args] = NULL; - - appname = basename(argv[0]); - if (!strcmp(appname, "ckproxy")) - ckp.proxy = true; - - while ((c = getopt_long(argc, argv, "Bc:Dd:g:HhkLl:Nn:PpqRS:s:tu", long_options, &i)) != -1) { - switch (c) { - case 'B': - if (ckp.proxy) - quit(1, "Cannot set both proxy and btcsolo mode"); - ckp.btcsolo = true; - break; - case 'c': - ckp.config = optarg; - break; - case 'D': - ckp.daemon = true; - break; - case 'g': - ckp.grpnam = optarg; - break; - case 'H': - ckp.handover = true; - ckp.killold = true; - break; - case 'h': - for (j = 0; long_options[j].val; j++) { - struct option *jopt = &long_options[j]; - - if (jopt->has_arg) { - char *upper = alloca(strlen(jopt->name) + 1); - int offset = 0; - - do { - upper[offset] = toupper(jopt->name[offset]); - } while (upper[offset++] != '\0'); - printf("-%c %s | --%s %s\n", jopt->val, - upper, jopt->name, upper); - } else - printf("-%c | --%s\n", jopt->val, jopt->name); - } - exit(0); - case 'k': - ckp.killold = true; - break; - case 'L': - ckp.logshares = true; - break; - case 'l': - ckp.loglevel = atoi(optarg); - if (ckp.loglevel < LOG_EMERG || ckp.loglevel > LOG_DEBUG) { - quit(1, "Invalid loglevel (range %d - %d): %d", - LOG_EMERG, LOG_DEBUG, ckp.loglevel); - } - break; - case 'N': - if (ckp.proxy || ckp.redirector || ckp.userproxy || ckp.passthrough) - quit(1, "Cannot set another proxy type or redirector and node mode"); - ckp.proxy = ckp.passthrough = ckp.node = true; - break; - case 'n': - ckp.name = optarg; - break; - case 'P': - if (ckp.proxy || ckp.redirector || ckp.userproxy || ckp.node) - quit(1, "Cannot set another proxy type or redirector and passthrough mode"); - ckp.proxy = ckp.passthrough = true; - break; - case 'p': - if (ckp.passthrough || ckp.redirector || ckp.userproxy || ckp.node) - quit(1, "Cannot set another proxy type or redirector and proxy mode"); - ckp.proxy = true; - break; - case 'q': - ckp.quiet = true; - break; - case 'R': - if (ckp.proxy || ckp.passthrough || ckp.userproxy || ckp.node) - quit(1, "Cannot set a proxy type or passthrough and redirector modes"); - ckp.proxy = ckp.passthrough = ckp.redirector = true; - break; - case 's': - ckp.socket_dir = strdup(optarg); - break; - case 't': - if (ckp.proxy) - quit(1, "Cannot set a proxy type and trusted remote mode"); - ckp.remote = true; - break; - case 'u': - if (ckp.proxy || ckp.redirector || ckp.passthrough || ckp.node) - quit(1, "Cannot set both userproxy and another proxy type or redirector"); - ckp.userproxy = ckp.proxy = true; - break; - } - } - - if (!ckp.name) { - if (ckp.node) - ckp.name = "cknode"; - else if (ckp.redirector) - ckp.name = "ckredirector"; - else if (ckp.passthrough) - ckp.name = "ckpassthrough"; - else if (ckp.proxy) - ckp.name = "ckproxy"; - else - ckp.name = "ckpool"; - } - snprintf(buf, 15, "%s", ckp.name); - prctl(PR_SET_NAME, buf, 0, 0, 0); - memset(buf, 0, 15); - - if (ckp.grpnam) { - struct group *group = getgrnam(ckp.grpnam); - - if (!group) - quit(1, "Failed to find group %s", ckp.grpnam); - ckp.gr_gid = group->gr_gid; - } else - ckp.gr_gid = getegid(); - - if (!ckp.config) { - ckp.config = strdup(ckp.name); - realloc_strcat(&ckp.config, ".conf"); - } - if (!ckp.socket_dir) { - ckp.socket_dir = strdup("/tmp/"); - realloc_strcat(&ckp.socket_dir, ckp.name); - } - trail_slash(&ckp.socket_dir); - - /* Ignore sigpipe */ - signal(SIGPIPE, SIG_IGN); - - ret = mkdir(ckp.socket_dir, 0750); - if (ret && errno != EEXIST) - quit(1, "Failed to make directory %s", ckp.socket_dir); - - parse_config(&ckp); - /* Set defaults if not found in config file */ - if (!ckp.btcds) { - ckp.btcds = 1; - ckp.btcdurl = ckzalloc(sizeof(char *)); - ckp.btcdauth = ckzalloc(sizeof(char *)); - ckp.btcdpass = ckzalloc(sizeof(char *)); - ckp.btcdnotify = ckzalloc(sizeof(bool)); - } - for (i = 0; i < ckp.btcds; i++) { - if (!ckp.btcdurl[i]) - ckp.btcdurl[i] = strdup("localhost:8332"); - if (!ckp.btcdauth[i]) - ckp.btcdauth[i] = strdup("user"); - if (!ckp.btcdpass[i]) - ckp.btcdpass[i] = strdup("pass"); - } - - ckp.donaddress = "bc1q28kkr5hk4gnqe3evma6runjrd2pvqyp8fpwfzu"; - - /* Donations on testnet are meaningless but required for complete - * testing. Testnet and regtest addresses */ - ckp.tndonaddress = "tb1q5fyv7tue73y4zxezh2c685qpwx0cfngfxlrgxh"; - ckp.rtdonaddress = "bcrt1qlk935ze2fsu86zjp395uvtegztrkaezawxx0wf"; - - if (!ckp.btcaddress && !ckp.btcsolo && !ckp.proxy) - quit(0, "Non solo mining must have a btcaddress in config, aborting!"); - if (!ckp.blockpoll) - ckp.blockpoll = 100; - if (!ckp.nonce1length) - ckp.nonce1length = 4; - else if (ckp.nonce1length < 2 || ckp.nonce1length > 8) - quit(0, "Invalid nonce1length %d specified, must be 2~8", ckp.nonce1length); - if (!ckp.nonce2length) { - /* nonce2length is zero by default in proxy mode */ - if (!ckp.proxy) - ckp.nonce2length = 8; - } else if (ckp.nonce2length < 2 || ckp.nonce2length > 8) - quit(0, "Invalid nonce2length %d specified, must be 2~8", ckp.nonce2length); - if (!ckp.update_interval) - ckp.update_interval = 30; - if (ckp.mindiff == 0.0) - ckp.mindiff = 1.0; - if (ckp.startdiff == 0.0) - ckp.startdiff = 42.0; - if (ckp.highdiff == 0.0) - ckp.highdiff = 1000000.0; - if (!ckp.logdir) - ckp.logdir = strdup("logs"); - if (!ckp.serverurls) - ckp.serverurl = ckzalloc(sizeof(char *)); - if (ckp.proxy && !ckp.proxies) - quit(0, "No proxy entries found in config file %s", ckp.config); - if (ckp.redirector && !ckp.redirecturls) - quit(0, "No redirect entries found in config file %s", ckp.config); - if (!ckp.zmqblock) - ckp.zmqblock = "tcp://127.0.0.1:28332"; - - /* Create the log directory */ - trail_slash(&ckp.logdir); - ret = mkdir(ckp.logdir, 0750); - if (ret && errno != EEXIST) - quit(1, "Failed to make log directory %s", ckp.logdir); - - /* Create the user logdir */ - sprintf(buf, "%s/users", ckp.logdir); - ret = mkdir(buf, 0750); - if (ret && errno != EEXIST) - quit(1, "Failed to make user log directory %s", buf); - - /* Create the pool logdir */ - sprintf(buf, "%s/pool", ckp.logdir); - ret = mkdir(buf, 0750); - if (ret && errno != EEXIST) - quit(1, "Failed to make pool log directory %s", buf); - - /* Create the logfile */ - ASPRINTF(&ckp.logfilename, "%s%s.log", ckp.logdir, ckp.name); - if (!open_logfile(&ckp)) - quit(1, "Failed to make open log file %s", buf); - launch_logger(&ckp); - - ckp.main.ckp = &ckp; - ckp.main.processname = strdup("main"); - ckp.main.sockname = strdup("listener"); - name_process_sockname(&ckp.main.us, &ckp.main); - ckp.oldconnfd = ckzalloc(sizeof(int *) * ckp.serverurls); - manage_old_instance(&ckp, &ckp.main); - if (ckp.handover) { - const char *path = ckp.main.us.path; - - if (send_recv_path(path, "ping")) { - for (i = 0; i < ckp.serverurls; i++) { - char oldurl[INET6_ADDRSTRLEN], oldport[8]; - char getfd[16]; - int sockd; - - snprintf(getfd, 15, "getxfd%d", i); - sockd = open_unix_client(path); - if (sockd < 1) - break; - if (!send_unix_msg(sockd, getfd)) - break; - ckp.oldconnfd[i] = get_fd(sockd); - Close(sockd); - sockd = ckp.oldconnfd[i]; - if (!sockd) - break; - if (url_from_socket(sockd, oldurl, oldport)) { - LOGWARNING("Inherited old server socket %d url %s:%s !", - i, oldurl, oldport); - } else { - LOGWARNING("Inherited old server socket %d with new file descriptor %d!", - i, ckp.oldconnfd[i]); - } - } - send_recv_path(path, "reject"); - send_recv_path(path, "reconnect"); - send_recv_path(path, "shutdown"); - } - } - - if (ckp.daemon) { - int fd; - - if (fork()) - exit(0); - setsid(); - fd = open("/dev/null",O_RDWR, 0); - if (fd != -1) { - dup2(fd, STDIN_FILENO); - dup2(fd, STDOUT_FILENO); - dup2(fd, STDERR_FILENO); - } - } - - write_namepid(&ckp.main); - open_process_sock(&ckp, &ckp.main, &ckp.main.us); - - ret = sysconf(_SC_OPEN_MAX); - if (ckp.maxclients > ret * 9 / 10) { - LOGWARNING("Cannot set maxclients to %d due to max open file limit of %d, reducing to %d", - ckp.maxclients, ret, ret * 9 / 10); - ckp.maxclients = ret * 9 / 10; - } else if (!ckp.maxclients) { - LOGNOTICE("Setting maxclients to %d due to max open file limit of %d", - ret * 9 / 10, ret); - ckp.maxclients = ret * 9 / 10; - } - - // ckp.ckpapi = create_ckmsgq(&ckp, "api", &ckpool_api); - create_pthread(&ckp.pth_listener, listener, &ckp.main); - - handler.sa_handler = &sighandler; - handler.sa_flags = 0; - sigemptyset(&handler.sa_mask); - sigaction(SIGTERM, &handler, NULL); - sigaction(SIGINT, &handler, NULL); - - /* Launch separate processes from here */ - prepare_child(&ckp, &ckp.generator, generator, "generator"); - prepare_child(&ckp, &ckp.stratifier, stratifier, "stratifier"); - prepare_child(&ckp, &ckp.connector, connector, "connector"); - - /* Shutdown from here if the listener is sent a shutdown message */ - if (ckp.pth_listener) - join_pthread(ckp.pth_listener); - - clean_up(&ckp); - - return 0; -} diff --git a/solo-ckpool-source/src/ckpool.h b/solo-ckpool-source/src/ckpool.h deleted file mode 100644 index 6ee68e8..0000000 --- a/solo-ckpool-source/src/ckpool.h +++ /dev/null @@ -1,404 +0,0 @@ -/* - * Copyright 2014-2018,2023 Con Kolivas - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 3 of the License, or (at your option) - * any later version. See COPYING for more details. - */ - -#ifndef CKPOOL_H -#define CKPOOL_H - -#include "config.h" - -#include -#include -#include - -#include "libckpool.h" -#include "uthash.h" - -#define RPC_TIMEOUT 60 - -struct ckpool_instance; -typedef struct ckpool_instance ckpool_t; - -struct ckmsg { - struct ckmsg *next; - struct ckmsg *prev; - void *data; -}; - -typedef struct ckmsg ckmsg_t; - -typedef struct unix_msg unix_msg_t; - -struct unix_msg { - unix_msg_t *next; - unix_msg_t *prev; - int sockd; - char *buf; -}; - -struct ckmsgq { - ckpool_t *ckp; - char name[16]; - pthread_t pth; - mutex_t *lock; - pthread_cond_t *cond; - ckmsg_t *msgs; - void (*func)(ckpool_t *, void *); - int64_t messages; - bool active; -}; - -typedef struct ckmsgq ckmsgq_t; - -typedef struct proc_instance proc_instance_t; - -struct proc_instance { - ckpool_t *ckp; - unixsock_t us; - char *processname; - char *sockname; - int pid; - int oldpid; - pthread_t pth_process; - - /* Linked list of received messages, locking and conditional */ - unix_msg_t *unix_msgs; - mutex_t rmsg_lock; - pthread_cond_t rmsg_cond; -}; - -struct connsock { - int fd; - char *url; - char *port; - char *auth; - - char *buf; - int bufofs; - int buflen; - int bufsize; - int rcvbufsiz; - int sendbufsiz; - - ckpool_t *ckp; - /* Semaphore used to serialise request/responses */ - sem_t sem; - - bool alive; -}; - -typedef struct connsock connsock_t; - -typedef struct char_entry char_entry_t; - -struct char_entry { - char_entry_t *next; - char_entry_t *prev; - char *buf; -}; - -typedef struct log_entry log_entry_t; - -struct log_entry { - log_entry_t *next; - log_entry_t *prev; - char *fname; - char *buf; -}; - -struct server_instance { - /* Hash table data */ - UT_hash_handle hh; - int id; - - char *url; - char *auth; - char *pass; - bool notify; - bool alive; - connsock_t cs; -}; - -typedef struct server_instance server_instance_t; - -struct ckpool_instance { - /* Start time */ - time_t starttime; - /* Start pid */ - pid_t startpid; - /* The initial command line arguments */ - char **initial_args; - /* Number of arguments */ - int args; - /* Filename of config file */ - char *config; - /* Kill old instance with same name */ - bool killold; - /* Whether to log shares or not */ - bool logshares; - /* Logging level */ - int loglevel; - /* Main process name */ - char *name; - /* Directory where sockets are created */ - char *socket_dir; - /* Group ID for unix sockets */ - char *grpnam; - gid_t gr_gid; - /* Directory where logs are written */ - char *logdir; - /* Logfile */ - char *logfilename; - FILE *logfp; - int logfd; - time_t lastopen_t; - /* Connector fds if we inherit them from a running process */ - int *oldconnfd; - /* Should we inherit a running instance's socket and shut it down */ - bool handover; - /* How many clients maximum to accept before rejecting further */ - int maxclients; - - /* API message queue */ - ckmsgq_t *ckpapi; - - /* Logger message queue */ - ckmsgq_t *logger; - ckmsgq_t *console_logger; - - /* Process instance data of parent/child processes */ - proc_instance_t main; - - proc_instance_t generator; - proc_instance_t stratifier; - proc_instance_t connector; - - bool generator_ready; - bool stratifier_ready; - bool connector_ready; - - /* Name of protocol used for ZMQ block notifications */ - char *zmqblock; - - /* Threads of main process */ - pthread_t pth_listener; - pthread_t pth_watchdog; - - /* Are we running in trusted remote node mode */ - bool remote; - - /* Are we running in node proxy mode */ - bool node; - - /* Are we running in passthrough mode */ - bool passthrough; - - /* Are we a redirecting passthrough */ - bool redirector; - - /* Are we running as a proxy */ - bool proxy; - - /* Are we running in btcsolo mode */ - bool btcsolo; - - /* Are we running in userproxy mode */ - bool userproxy; - - /* Should we daemonise the ckpool process */ - bool daemon; - - /* Should we disable the throbber */ - bool quiet; - - /* Have we given warnings about the inability to raise buf sizes */ - bool wmem_warn; - bool rmem_warn; - - /* Bitcoind data */ - int btcds; - char **btcdurl; - char **btcdauth; - char **btcdpass; - bool *btcdnotify; - int blockpoll; // How frequently in ms to poll bitcoind for block updates - int nonce1length; // Extranonce1 length - int nonce2length; // Extranonce2 length - - /* Difficulty settings */ - double mindiff; // Default 1.0 (supports fractional values) - double startdiff; // Default 42.0 (supports fractional values) - double highdiff; // Default 1000000.0 (supports fractional values) - double maxdiff; // No default (supports fractional values) - - /* Coinbase data */ - char *btcaddress; // Address to mine to - bool script; // Address is a script address - bool segwit; // Address is a segwit address - char *btcsig; // Optional signature to add to coinbase - bool coinbase_valid; // Coinbase transaction confirmed valid - - /* Donation data */ - char *donaddress; // Donation address - char *tndonaddress; // Testnet donation address - char *rtdonaddress; // Regtest donation address - bool donscript; // Donation is a script - bool donsegwit; // Donation is segwit - bool donvalid; // Donation address works on this network - double donation; // Percentage donation to development - - /* Stratum options */ - server_instance_t **servers; - char **serverurl; // Array of URLs to bind our server/proxy to - int serverurls; // Number of server bindings - bool *server_highdiff; // If this server is highdiff - bool *nodeserver; // If this server URL serves node information - int nodeservers; // If this server has remote node servers - bool *trusted; // If this server URL accepts trusted remote nodes - char *upstream; // Upstream pool in trusted remote mode - - int update_interval; // Seconds between stratum updates - - uint32_t version_mask; // Bits which set to true means allow miner to modify those bits - - /* Proxy options */ - int proxies; - char **proxyurl; - char **proxyauth; - char **proxypass; - - /* Passthrough redirect options */ - int redirecturls; - char **redirecturl; - char **redirectport; - - /* Private data for each process */ - void *gdata; - void *sdata; - void *cdata; -}; - -enum stratum_msgtype { - SM_RECONNECT = 0, - SM_DIFF, - SM_MSG, - SM_UPDATE, - SM_ERROR, - SM_SUBSCRIBE, - SM_SUBSCRIBERESULT, - SM_SHARE, - SM_SHARERESULT, - SM_AUTH, - SM_AUTHRESULT, - SM_TXNS, - SM_TXNSRESULT, - SM_PING, - SM_WORKINFO, - SM_SUGGESTDIFF, - SM_BLOCK, - SM_PONG, - SM_TRANSACTIONS, - SM_SHAREERR, - SM_WORKERSTATS, - SM_REQTXNS, - SM_CONFIGURE, - SM_NONE -}; - -static const char __maybe_unused *stratum_msgs[] = { - "reconnect", - "diff", - "message", - "update", - "error", - "subscribe", - "subscribe.result", - "share", - "share.result", - "auth", - "auth.result", - "txns", - "txns.result", - "ping", - "workinfo", - "suggestdiff", - "block", - "pong", - "transactions", - "shareerr", - "workerstats", - "reqtxns", - "mining.configure", - "" -}; - -#define SAFE_HASH_OVERHEAD(HASHLIST) (HASHLIST ? HASH_OVERHEAD(hh, HASHLIST) : 0) - -void get_timestamp(char *stamp); - -ckmsgq_t *create_ckmsgq(ckpool_t *ckp, const char *name, const void *func); -ckmsgq_t *create_ckmsgqs(ckpool_t *ckp, const char *name, const void *func, const int count); -bool _ckmsgq_add(ckmsgq_t *ckmsgq, void *data, const char *file, const char *func, const int line); -#define ckmsgq_add(ckmsgq, data) _ckmsgq_add(ckmsgq, data, __FILE__, __func__, __LINE__) -bool ckmsgq_empty(ckmsgq_t *ckmsgq); -unix_msg_t *get_unix_msg(proc_instance_t *pi); - -bool ping_main(ckpool_t *ckp); -void empty_buffer(connsock_t *cs); -int set_sendbufsize(ckpool_t *ckp, const int fd, const int len); -int set_recvbufsize(ckpool_t *ckp, const int fd, const int len); -int read_socket_line(connsock_t *cs, float *timeout); -void _queue_proc(proc_instance_t *pi, const char *msg, const char *file, const char *func, const int line); -#define send_proc(pi, msg) _queue_proc(&(pi), msg, __FILE__, __func__, __LINE__) -char *_send_recv_proc(const proc_instance_t *pi, const char *msg, int writetimeout, int readtimedout, - const char *file, const char *func, const int line); -#define send_recv_proc(pi, msg) _send_recv_proc(&(pi), msg, UNIX_WRITE_TIMEOUT, UNIX_READ_TIMEOUT, __FILE__, __func__, __LINE__) -char *_send_recv_ckdb(const ckpool_t *ckp, const char *msg, const char *file, const char *func, const int line); -#define send_recv_ckdb(ckp, msg) _send_recv_ckdb(ckp, msg, __FILE__, __func__, __LINE__) -char *_ckdb_msg_call(const ckpool_t *ckp, const char *msg, const char *file, const char *func, - const int line); -#define ckdb_msg_call(ckp, msg) _ckdb_msg_call(ckp, msg, __FILE__, __func__, __LINE__) - -json_t *json_rpc_call(connsock_t *cs, const char *rpc_req); -json_t *json_rpc_response(connsock_t *cs, const char *rpc_req); -void json_rpc_msg(connsock_t *cs, const char *rpc_req); -bool _send_json_msg(connsock_t *cs, const json_t *json_msg, const char *file, const char *func, const int line); -#define send_json_msg(CS, JSON_MSG) _send_json_msg(CS, JSON_MSG, __FILE__, __func__, __LINE__) -json_t *json_msg_result(const char *msg, json_t **res_val, json_t **err_val); - -bool json_get_string(char **store, const json_t *val, const char *res); -bool json_get_int64(int64_t *store, const json_t *val, const char *res); -bool json_get_int(int *store, const json_t *val, const char *res); -bool json_get_double(double *store, const json_t *val, const char *res); -bool json_get_uint32(uint32_t *store, const json_t *val, const char *res); -bool json_get_bool(bool *store, const json_t *val, const char *res); -bool json_getdel_int(int *store, json_t *val, const char *res); -bool json_getdel_int64(int64_t *store, json_t *val, const char *res); - - -/* API Placeholders for future API implementation */ -typedef struct apimsg apimsg_t; - -struct apimsg { - char *buf; - int sockd; -}; - -static inline void ckpool_api(ckpool_t __maybe_unused *ckp, apimsg_t __maybe_unused *apimsg) {}; -static inline json_t *json_encode_errormsg(json_error_t __maybe_unused *err_val) { return NULL; }; -static inline json_t *json_errormsg(const char __maybe_unused *fmt, ...) { return NULL; }; -static inline void send_api_response(json_t __maybe_unused *val, const int __maybe_unused sockd) {}; - -/* Subclients have client_ids in the high bits. Returns the value of the parent - * client if one exists. */ -static inline int64_t subclient(const int64_t client_id) -{ - return (client_id >> 32); -} - -#endif /* CKPOOL_H */ diff --git a/solo-ckpool-source/src/connector.c b/solo-ckpool-source/src/connector.c deleted file mode 100644 index 2b6ea4e..0000000 --- a/solo-ckpool-source/src/connector.c +++ /dev/null @@ -1,1667 +0,0 @@ -/* - * Copyright 2014-2017 Con Kolivas - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 3 of the License, or (at your option) - * any later version. See COPYING for more details. - */ - -#include "config.h" - -#include -#include -#include -#include -#include -#include - -#include "ckpool.h" -#include "libckpool.h" -#include "uthash.h" -#include "utlist.h" -#include "stratifier.h" -#include "generator.h" - -#define MAX_MSGSIZE 1024 - -typedef struct client_instance client_instance_t; -typedef struct sender_send sender_send_t; -typedef struct share share_t; -typedef struct redirect redirect_t; - -struct client_instance { - /* For clients hashtable */ - UT_hash_handle hh; - int64_t id; - - /* fd cannot be changed while a ref is held */ - int fd; - - /* Reference count for when this instance is used outside of the - * connector_data lock */ - int ref; - - /* Have we disabled this client to be removed when there are no refs? */ - bool invalid; - - /* For dead_clients list */ - client_instance_t *dead_next; - client_instance_t *dead_prev; - - client_instance_t *recycled_next; - client_instance_t *recycled_prev; - - - struct sockaddr_storage address_storage; - struct sockaddr *address; - char address_name[INET6_ADDRSTRLEN]; - - /* Which serverurl is this instance connected to */ - int server; - - char *buf; - unsigned long bufofs; - - /* Are we currently sending a blocked message from this client */ - sender_send_t *sending; - - /* Is this a trusted remote server */ - bool remote; - - /* Is this the parent passthrough client */ - bool passthrough; - - /* Linked list of shares in redirector mode.*/ - share_t *shares; - - /* Has this client already been told to redirect */ - bool redirected; - /* Has this client been authorised in redirector mode */ - bool authorised; - - /* Time this client started blocking, 0 when not blocked */ - time_t blocked_time; - - /* The size of the socket send buffer */ - int sendbufsize; -}; - -struct sender_send { - struct sender_send *next; - struct sender_send *prev; - - client_instance_t *client; - char *buf; - int len; - int ofs; -}; - -struct share { - share_t *next; - share_t *prev; - - time_t submitted; - int64_t id; -}; - -struct redirect { - UT_hash_handle hh; - char address_name[INET6_ADDRSTRLEN]; - int id; - int redirect_no; -}; - -/* Private data for the connector */ -struct connector_data { - ckpool_t *ckp; - cklock_t lock; - proc_instance_t *pi; - - time_t start_time; - - /* Array of server fds */ - int *serverfd; - /* All time count of clients connected */ - int nfds; - /* The epoll fd */ - int epfd; - - bool accept; - pthread_t pth_sender; - pthread_t pth_receiver; - - /* For the hashtable of all clients */ - client_instance_t *clients; - /* Linked list of dead clients no longer in use but may still have references */ - client_instance_t *dead_clients; - /* Linked list of client structures we can reuse */ - client_instance_t *recycled_clients; - - int clients_generated; - int dead_generated; - - int64_t client_ids; - - /* client message process queue */ - ckmsgq_t *cmpq; - - /* client message event process queue */ - ckmsgq_t *cevents; - - /* For the linked list of pending sends */ - sender_send_t *sender_sends; - - int64_t sends_generated; - int64_t sends_delayed; - int64_t sends_queued; - int64_t sends_size; - - /* For protecting the pending sends list */ - mutex_t sender_lock; - pthread_cond_t sender_cond; - - /* Hash list of all redirected IP address in redirector mode */ - redirect_t *redirects; - /* What redirect we're currently up to */ - int redirect; - - /* Pending sends to the upstream server */ - ckmsgq_t *upstream_sends; - connsock_t upstream_cs; - - /* Have we given the warning about inability to raise sendbuf size */ - bool wmem_warn; -}; - -typedef struct connector_data cdata_t; - -void connector_upstream_msg(ckpool_t *ckp, char *msg) -{ - cdata_t *cdata = ckp->cdata; - - LOGDEBUG("Upstreaming %s", msg); - ckmsgq_add(cdata->upstream_sends, msg); -} - -/* Increase the reference count of instance */ -static void __inc_instance_ref(client_instance_t *client) -{ - client->ref++; -} - -static void inc_instance_ref(cdata_t *cdata, client_instance_t *client) -{ - ck_wlock(&cdata->lock); - __inc_instance_ref(client); - ck_wunlock(&cdata->lock); -} - -/* Increase the reference count of instance */ -static void __dec_instance_ref(client_instance_t *client) -{ - client->ref--; -} - -static void dec_instance_ref(cdata_t *cdata, client_instance_t *client) -{ - ck_wlock(&cdata->lock); - __dec_instance_ref(client); - ck_wunlock(&cdata->lock); -} - -/* Recruit a client structure from a recycled one if available, creating a - * new structure only if we have none to reuse. */ -static client_instance_t *recruit_client(cdata_t *cdata) -{ - client_instance_t *client = NULL; - - ck_wlock(&cdata->lock); - if (cdata->recycled_clients) { - client = cdata->recycled_clients; - DL_DELETE2(cdata->recycled_clients, client, recycled_prev, recycled_next); - } else - cdata->clients_generated++; - ck_wunlock(&cdata->lock); - - if (!client) { - LOGDEBUG("Connector created new client instance"); - client = ckzalloc(sizeof(client_instance_t)); - } else - LOGDEBUG("Connector recycled client instance"); - - client->buf = ckzalloc(PAGESIZE); - - return client; -} - -static void __recycle_client(cdata_t *cdata, client_instance_t *client) -{ - dealloc(client->buf); - memset(client, 0, sizeof(client_instance_t)); - client->id = -1; - DL_APPEND2(cdata->recycled_clients, client, recycled_prev, recycled_next); -} - -static void recycle_client(cdata_t *cdata, client_instance_t *client) -{ - ck_wlock(&cdata->lock); - __recycle_client(cdata, client); - ck_wunlock(&cdata->lock); -} - -/* Allows the stratifier to get a unique local virtualid for subclients */ -int64_t connector_newclientid(ckpool_t *ckp) -{ - int64_t ret; - - cdata_t *cdata = ckp->cdata; - - ck_wlock(&cdata->lock); - ret = cdata->client_ids++; - ck_wunlock(&cdata->lock); - - return ret; -} - -/* Accepts incoming connections on the server socket and generates client - * instances */ -static int accept_client(cdata_t *cdata, const int epfd, const uint64_t server) -{ - int fd, port, no_clients, sockd; - ckpool_t *ckp = cdata->ckp; - client_instance_t *client; - struct epoll_event event; - socklen_t address_len; - socklen_t optlen; - - ck_rlock(&cdata->lock); - no_clients = HASH_COUNT(cdata->clients); - ck_runlock(&cdata->lock); - - if (unlikely(ckp->maxclients && no_clients >= ckp->maxclients)) { - LOGWARNING("Server full with %d clients", no_clients); - return 0; - } - - sockd = cdata->serverfd[server]; - client = recruit_client(cdata); - client->server = server; - client->address = (struct sockaddr *)&client->address_storage; - address_len = sizeof(client->address_storage); - fd = accept(sockd, client->address, &address_len); - if (unlikely(fd < 0)) { - /* Handle these errors gracefully should we ever share this - * socket */ - if (errno == EAGAIN || errno == EWOULDBLOCK || errno == ECONNABORTED) { - LOGERR("Recoverable error on accept in accept_client"); - return 0; - } - LOGERR("Failed to accept on socket %d in acceptor", sockd); - recycle_client(cdata, client); - return -1; - } - - switch (client->address->sa_family) { - const struct sockaddr_in *inet4_in; - const struct sockaddr_in6 *inet6_in; - - case AF_INET: - inet4_in = (struct sockaddr_in *)client->address; - inet_ntop(AF_INET, &inet4_in->sin_addr, client->address_name, INET6_ADDRSTRLEN); - port = htons(inet4_in->sin_port); - break; - case AF_INET6: - inet6_in = (struct sockaddr_in6 *)client->address; - inet_ntop(AF_INET6, &inet6_in->sin6_addr, client->address_name, INET6_ADDRSTRLEN); - port = htons(inet6_in->sin6_port); - break; - default: - LOGWARNING("Unknown INET type for client %d on socket %d", - cdata->nfds, fd); - Close(fd); - recycle_client(cdata, client); - return 0; - } - - keep_sockalive(fd); - noblock_socket(fd); - - LOGINFO("Connected new client %d on socket %d to %d active clients from %s:%d", - cdata->nfds, fd, no_clients, client->address_name, port); - - ck_wlock(&cdata->lock); - client->id = cdata->client_ids++; - HASH_ADD_I64(cdata->clients, id, client); - cdata->nfds++; - ck_wunlock(&cdata->lock); - - /* We increase the ref count on this client as epoll creates a pointer - * to it. We drop that reference when the socket is closed which - * removes it automatically from the epoll list. */ - __inc_instance_ref(client); - client->fd = fd; - optlen = sizeof(client->sendbufsize); - getsockopt(fd, SOL_SOCKET, SO_RCVBUF, &client->sendbufsize, &optlen); - LOGDEBUG("Client sendbufsize detected as %d", client->sendbufsize); - - event.data.u64 = client->id; - event.events = EPOLLIN | EPOLLRDHUP | EPOLLONESHOT; - if (unlikely(epoll_ctl(epfd, EPOLL_CTL_ADD, fd, &event) < 0)) { - LOGERR("Failed to epoll_ctl add in accept_client"); - dec_instance_ref(cdata, client); - return 0; - } - - return 1; -} - -static int __drop_client(cdata_t *cdata, client_instance_t *client) -{ - int ret = -1; - - if (client->invalid) - goto out; - client->invalid = true; - ret = client->fd; - /* Closing the fd will automatically remove it from the epoll list */ - Close(client->fd); - HASH_DEL(cdata->clients, client); - DL_APPEND2(cdata->dead_clients, client, dead_prev, dead_next); - /* This is the reference to this client's presence in the - * epoll list. */ - __dec_instance_ref(client); - cdata->dead_generated++; -out: - return ret; -} - -static void stratifier_drop_id(ckpool_t *ckp, const int64_t id) -{ - char buf[256]; - - sprintf(buf, "dropclient=%"PRId64, id); - send_proc(ckp->stratifier, buf); -} - -/* Client must hold a reference count */ -static int drop_client(cdata_t *cdata, client_instance_t *client) -{ - bool passthrough = client->passthrough, remote = client->remote; - char address_name[INET6_ADDRSTRLEN]; - int64_t client_id = client->id; - int fd = -1; - - strcpy(address_name, client->address_name); - ck_wlock(&cdata->lock); - fd = __drop_client(cdata, client); - ck_wunlock(&cdata->lock); - - if (fd > -1) { - if (passthrough) { - LOGNOTICE("Connector dropped passthrough %"PRId64" %s", - client_id, address_name); - } else if (remote) { - LOGWARNING("Remote trusted server client %"PRId64" %s disconnected", - client_id, address_name); - } - LOGDEBUG("Connector dropped fd %d", fd); - stratifier_drop_id(cdata->ckp, client_id); - } - - return fd; -} - -/* For sending the drop command to the upstream pool in passthrough mode */ -static void generator_drop_client(ckpool_t *ckp, const client_instance_t *client) -{ - json_t *val; - - JSON_CPACK(val, "{si,sI:ss:si:ss:s[]}", "id", 42, "client_id", client->id, "address", - client->address_name, "server", client->server, "method", "mining.term", - "params"); - generator_add_send(ckp, val); -} - -static void stratifier_drop_client(ckpool_t *ckp, const client_instance_t *client) -{ - stratifier_drop_id(ckp, client->id); -} - -/* Invalidate this instance. Remove them from the hashtables we look up - * regularly but keep the instances in a linked list until their ref count - * drops to zero when we can remove them lazily. Client must hold a reference - * count. */ -static int invalidate_client(ckpool_t *ckp, cdata_t *cdata, client_instance_t *client) -{ - client_instance_t *tmp; - int ret; - - ret = drop_client(cdata, client); - if ((!ckp->passthrough || ckp->node) && !client->passthrough) - stratifier_drop_client(ckp, client); - if (ckp->passthrough) - generator_drop_client(ckp, client); - - /* Cull old unused clients lazily when there are no more reference - * counts for them. */ - ck_wlock(&cdata->lock); - DL_FOREACH_SAFE2(cdata->dead_clients, client, tmp, dead_next) { - if (!client->ref) { - DL_DELETE2(cdata->dead_clients, client, dead_prev, dead_next); - LOGINFO("Connector recycling client %"PRId64, client->id); - /* We only close the client fd once we're sure there - * are no references to it left to prevent fds being - * reused on new and old clients. */ - nolinger_socket(client->fd); - Close(client->fd); - __recycle_client(cdata, client); - } - } - ck_wunlock(&cdata->lock); - - return ret; -} - -static void drop_all_clients(cdata_t *cdata) -{ - client_instance_t *client, *tmp; - - ck_wlock(&cdata->lock); - HASH_ITER(hh, cdata->clients, client, tmp) { - __drop_client(cdata, client); - } - ck_wunlock(&cdata->lock); -} - -static void send_client(ckpool_t *ckp, cdata_t *cdata, int64_t id, char *buf); - -/* Look for shares being submitted via a redirector and add them to a linked - * list for looking up the responses. */ -static void parse_redirector_share(cdata_t *cdata, client_instance_t *client, const json_t *val) -{ - share_t *share, *tmp; - time_t now; - int64_t id; - - if (!json_get_int64(&id, val, "id")) { - LOGNOTICE("Failed to find redirector share id"); - return; - } - share = ckzalloc(sizeof(share_t)); - now = time(NULL); - share->submitted = now; - share->id = id; - - LOGINFO("Redirector adding client %"PRId64" share id: %"PRId64, client->id, id); - - /* We use the cdata lock instead of a separate lock since this function - * is called infrequently. */ - ck_wlock(&cdata->lock); - DL_APPEND(client->shares, share); - - /* Age old shares. */ - DL_FOREACH_SAFE(client->shares, share, tmp) { - if (now > share->submitted + 120) { - DL_DELETE(client->shares, share); - dealloc(share); - } - } - ck_wunlock(&cdata->lock); -} - -/* Client is holding a reference count from being on the epoll list. Returns - * true if we will still be receiving messages from this client. */ -static bool parse_client_msg(ckpool_t *ckp, cdata_t *cdata, client_instance_t *client) -{ - int buflen, ret; - json_t *val; - char *eol; - -retry: - if (unlikely(client->bufofs > MAX_MSGSIZE)) { - if (!client->remote) { - LOGNOTICE("Client id %"PRId64" fd %d overloaded buffer without EOL, disconnecting", - client->id, client->fd); - return false; - } - client->buf = realloc(client->buf, round_up_page(client->bufofs + MAX_MSGSIZE + 1)); - } - /* This read call is non-blocking since the socket is set to O_NOBLOCK */ - ret = read(client->fd, client->buf + client->bufofs, MAX_MSGSIZE); - if (ret < 1) { - if (likely(errno == EAGAIN || errno == EWOULDBLOCK || !ret)) - return true; - LOGINFO("Client id %"PRId64" fd %d disconnected - recv fail with bufofs %lu ret %d errno %d %s", - client->id, client->fd, client->bufofs, ret, errno, ret && errno ? strerror(errno) : ""); - return false; - } - client->bufofs += ret; -reparse: - eol = memchr(client->buf, '\n', client->bufofs); - if (!eol) - goto retry; - - /* Do something useful with this message now */ - buflen = eol - client->buf + 1; - if (unlikely(buflen > MAX_MSGSIZE && !client->remote)) { - LOGNOTICE("Client id %"PRId64" fd %d message oversize, disconnecting", client->id, client->fd); - return false; - } - - if (!(val = json_loads(client->buf, JSON_DISABLE_EOF_CHECK, NULL))) { - char *buf = strdup("Invalid JSON, disconnecting\n"); - - LOGINFO("Client id %"PRId64" sent invalid json message %s", client->id, client->buf); - send_client(ckp, cdata, client->id, buf); - return false; - } else { - if (client->passthrough) { - int64_t passthrough_id; - - json_getdel_int64(&passthrough_id, val, "client_id"); - passthrough_id = (client->id << 32) | passthrough_id; - json_object_set_new_nocheck(val, "client_id", json_integer(passthrough_id)); - } else { - if (ckp->redirector && !client->redirected && strstr(client->buf, "mining.submit")) - parse_redirector_share(cdata, client, val); - json_object_set_new_nocheck(val, "client_id", json_integer(client->id)); - json_object_set_new_nocheck(val, "address", json_string(client->address_name)); - } - json_object_set_new_nocheck(val, "server", json_integer(client->server)); - - /* Do not send messages of clients we've already dropped. We - * do this unlocked as the occasional false negative can be - * filtered by the stratifier. */ - if (likely(!client->invalid)) { - if (!ckp->passthrough) - stratifier_add_recv(ckp, val); - if (ckp->node) - stratifier_add_recv(ckp, json_deep_copy(val)); - if (ckp->passthrough) - generator_add_send(ckp, val); - } else - json_decref(val); - } - client->bufofs -= buflen; - if (client->bufofs) - memmove(client->buf, client->buf + buflen, client->bufofs); - client->buf[client->bufofs] = '\0'; - - if (client->bufofs) - goto reparse; - goto retry; -} - -static client_instance_t *ref_client_by_id(cdata_t *cdata, int64_t id) -{ - client_instance_t *client; - - ck_wlock(&cdata->lock); - HASH_FIND_I64(cdata->clients, &id, client); - if (client) { - if (!client->invalid) - __inc_instance_ref(client); - else - client = NULL; - } - ck_wunlock(&cdata->lock); - - return client; -} - -static void redirect_client(ckpool_t *ckp, client_instance_t *client); - -static bool redirect_matches(cdata_t *cdata, client_instance_t *client) -{ - redirect_t *redirect; - - ck_rlock(&cdata->lock); - HASH_FIND_STR(cdata->redirects, client->address_name, redirect); - ck_runlock(&cdata->lock); - - return redirect; -} - -static void client_event_processor(ckpool_t *ckp, struct epoll_event *event) -{ - const uint32_t events = event->events; - const uint64_t id = event->data.u64; - cdata_t *cdata = ckp->cdata; - client_instance_t *client; - - client = ref_client_by_id(cdata, id); - if (unlikely(!client)) { - LOGNOTICE("Failed to find client by id %"PRId64" in receiver!", id); - goto outnoclient; - } - /* We can have both messages and read hang ups so process the - * message first. */ - if (likely(events & EPOLLIN)) { - /* Rearm the client for epoll events if we have successfully - * parsed a message from it */ - if (unlikely(!parse_client_msg(ckp, cdata, client))) { - invalidate_client(ckp, cdata, client); - goto out; - } - } - if (unlikely(events & EPOLLERR)) { - socklen_t errlen = sizeof(int); - int error = 0; - - /* See what type of error this is and raise the log - * level of the message if it's unexpected. */ - getsockopt(client->fd, SOL_SOCKET, SO_ERROR, (void *)&error, &errlen); - if (error != 104) { - LOGNOTICE("Client id %"PRId64" fd %d epollerr HUP in epoll with errno %d: %s", - client->id, client->fd, error, strerror(error)); - } else { - LOGINFO("Client id %"PRId64" fd %d epollerr HUP in epoll with errno %d: %s", - client->id, client->fd, error, strerror(error)); - } - invalidate_client(cdata->pi->ckp, cdata, client); - } else if (unlikely(events & EPOLLHUP)) { - /* Client connection reset by peer */ - LOGINFO("Client id %"PRId64" fd %d HUP in epoll", client->id, client->fd); - invalidate_client(cdata->pi->ckp, cdata, client); - } else if (unlikely(events & EPOLLRDHUP)) { - /* Client disconnected by peer */ - LOGINFO("Client id %"PRId64" fd %d RDHUP in epoll", client->id, client->fd); - invalidate_client(cdata->pi->ckp, cdata, client); - } -out: - if (likely(!client->invalid)) { - /* Rearm the fd in the epoll list if it's still active */ - event->data.u64 = id; - event->events = EPOLLIN | EPOLLRDHUP | EPOLLONESHOT; - epoll_ctl(cdata->epfd, EPOLL_CTL_MOD, client->fd, event); - } - dec_instance_ref(cdata, client); -outnoclient: - free(event); -} - -/* Waits on fds ready to read on from the list stored in conn_instance and - * handles the incoming messages */ -static void *receiver(void *arg) -{ - cdata_t *cdata = (cdata_t *)arg; - struct epoll_event *event = ckzalloc(sizeof(struct epoll_event)); - ckpool_t *ckp = cdata->ckp; - uint64_t serverfds, i; - int ret, epfd; - - rename_proc("creceiver"); - - epfd = cdata->epfd = epoll_create1(EPOLL_CLOEXEC); - if (epfd < 0) { - LOGEMERG("FATAL: Failed to create epoll in receiver"); - goto out; - } - serverfds = ckp->serverurls; - /* Add all the serverfds to the epoll */ - for (i = 0; i < serverfds; i++) { - /* The small values will be less than the first client ids */ - event->data.u64 = i; - event->events = EPOLLIN | EPOLLRDHUP; - ret = epoll_ctl(epfd, EPOLL_CTL_ADD, cdata->serverfd[i], event); - if (ret < 0) { - LOGEMERG("FATAL: Failed to add epfd %d to epoll_ctl", epfd); - goto out; - } - } - - /* Wait for the stratifier to be ready for us */ - while (!ckp->stratifier_ready) - cksleep_ms(10); - - while (42) { - uint64_t edu64; - - while (unlikely(!cdata->accept)) - cksleep_ms(10); - ret = epoll_wait(epfd, event, 1, 1000); - if (unlikely(ret < 1)) { - if (unlikely(ret == -1)) { - LOGEMERG("FATAL: Failed to epoll_wait in receiver"); - break; - } - /* Nothing to service, still very unlikely */ - continue; - } - edu64 = event->data.u64; - if (edu64 < serverfds) { - ret = accept_client(cdata, epfd, edu64); - if (unlikely(ret < 0)) { - LOGEMERG("FATAL: Failed to accept_client in receiver"); - break; - } - continue; - } - /* Event structure is handed off to client_event_processor - * here to be freed so we need to allocate a new one */ - ckmsgq_add(cdata->cevents, event); - event = ckzalloc(sizeof(struct epoll_event)); - } -out: - /* We shouldn't get here unless there's an error */ - return NULL; -} - -/* Send a sender_send message and return true if we've finished sending it or - * are unable to send any more. */ -static bool send_sender_send(ckpool_t *ckp, cdata_t *cdata, sender_send_t *sender_send) -{ - client_instance_t *client = sender_send->client; - time_t now_t; - - if (unlikely(client->invalid)) - goto out_true; - - /* Make sure we only send one message at a time to each client */ - if (unlikely(client->sending && client->sending != sender_send)) - return false; - - client->sending = sender_send; - now_t = time(NULL); - - /* Increase sendbufsize to match large messages sent to clients - this - * usually only applies to clients as mining nodes. */ - if (unlikely(!ckp->wmem_warn && sender_send->len > client->sendbufsize)) - client->sendbufsize = set_sendbufsize(ckp, client->fd, sender_send->len); - - while (sender_send->len) { - int ret = write(client->fd, sender_send->buf + sender_send->ofs, sender_send->len); - - if (ret < 1) { - /* Invalidate clients that block for more than 60 seconds */ - if (unlikely(client->blocked_time && now_t - client->blocked_time >= 60)) { - LOGNOTICE("Client id %"PRId64" fd %d blocked for >60 seconds, disconnecting", - client->id, client->fd); - invalidate_client(ckp, cdata, client); - goto out_true; - } - if (errno == EAGAIN || errno == EWOULDBLOCK || !ret) { - if (!client->blocked_time) - client->blocked_time = now_t; - return false; - } - LOGINFO("Client id %"PRId64" fd %d disconnected with write errno %d:%s", - client->id, client->fd, errno, strerror(errno)); - invalidate_client(ckp, cdata, client); - goto out_true; - } - sender_send->ofs += ret; - sender_send->len -= ret; - client->blocked_time = 0; - } -out_true: - client->sending = NULL; - return true; -} - -static void clear_sender_send(sender_send_t *sender_send, cdata_t *cdata) -{ - dec_instance_ref(cdata, sender_send->client); - free(sender_send->buf); - free(sender_send); -} - -/* Use a thread to send queued messages, appending them to the sends list and - * iterating over all of them, attempting to send them all non-blocking to - * only send to those clients ready to receive data. */ -static void *sender(void *arg) -{ - cdata_t *cdata = (cdata_t *)arg; - sender_send_t *sends = NULL; - ckpool_t *ckp = cdata->ckp; - - rename_proc("csender"); - - while (42) { - int64_t sends_queued = 0, sends_size = 0; - sender_send_t *sending, *tmp; - - /* Check all sends to see if they can be written out */ - DL_FOREACH_SAFE(sends, sending, tmp) { - if (send_sender_send(ckp, cdata, sending)) { - DL_DELETE(sends, sending); - clear_sender_send(sending, cdata); - } else { - sends_queued++; - sends_size += sizeof(sender_send_t) + sending->len + 1; - } - } - - mutex_lock(&cdata->sender_lock); - cdata->sends_delayed += sends_queued; - cdata->sends_queued = sends_queued; - cdata->sends_size = sends_size; - /* Poll every 10ms if there are no new sends. */ - if (!cdata->sender_sends) { - const ts_t polltime = {0, 10000000}; - ts_t timeout_ts; - - ts_realtime(&timeout_ts); - timeraddspec(&timeout_ts, &polltime); - cond_timedwait(&cdata->sender_cond, &cdata->sender_lock, &timeout_ts); - } - if (cdata->sender_sends) { - DL_CONCAT(sends, cdata->sender_sends); - cdata->sender_sends = NULL; - } - mutex_unlock(&cdata->sender_lock); - } - /* We shouldn't get here unless there's an error */ - return NULL; -} - -static int add_redirect(ckpool_t *ckp, cdata_t *cdata, client_instance_t *client) -{ - redirect_t *redirect; - bool found; - - ck_wlock(&cdata->lock); - HASH_FIND_STR(cdata->redirects, client->address_name, redirect); - if (!redirect) { - redirect = ckzalloc(sizeof(redirect_t)); - strcpy(redirect->address_name, client->address_name); - redirect->redirect_no = cdata->redirect++; - if (cdata->redirect >= ckp->redirecturls) - cdata->redirect = 0; - HASH_ADD_STR(cdata->redirects, address_name, redirect); - found = false; - } else - found = true; - ck_wunlock(&cdata->lock); - - LOGNOTICE("Redirecting client %"PRId64" from %s IP %s to redirecturl %d", - client->id, found ? "matching" : "new", client->address_name, redirect->redirect_no); - return redirect->redirect_no; -} - -static void redirect_client(ckpool_t *ckp, client_instance_t *client) -{ - sender_send_t *sender_send; - cdata_t *cdata = ckp->cdata; - json_t *val; - char *buf; - int num; - - /* Set the redirected boool to only try redirecting them once */ - client->redirected = true; - - num = add_redirect(ckp, cdata, client); - JSON_CPACK(val, "{sosss[ssi]}", "id", json_null(), "method", "client.reconnect", - "params", ckp->redirecturl[num], ckp->redirectport[num], 0); - buf = json_dumps(val, JSON_EOL | JSON_COMPACT); - json_decref(val); - - sender_send = ckzalloc(sizeof(sender_send_t)); - sender_send->client = client; - sender_send->buf = buf; - sender_send->len = strlen(buf); - inc_instance_ref(cdata, client); - - mutex_lock(&cdata->sender_lock); - cdata->sends_generated++; - DL_APPEND(cdata->sender_sends, sender_send); - pthread_cond_signal(&cdata->sender_cond); - mutex_unlock(&cdata->sender_lock); -} - -/* Look for accepted shares in redirector mode to know we can redirect this - * client to a protected server. */ -static bool test_redirector_shares(cdata_t *cdata, client_instance_t *client, const char *buf) -{ - json_t *val = json_loads(buf, 0, NULL); - share_t *share, *found = NULL; - bool ret = false; - int64_t id; - - if (!val) { - /* Can happen when responding to invalid json from client */ - LOGINFO("Invalid json response to client %"PRId64 "%s", client->id, buf); - return ret; - } - if (!json_get_int64(&id, val, "id")) { - LOGINFO("Failed to find response id"); - goto out; - } - - ck_rlock(&cdata->lock); - DL_FOREACH(client->shares, share) { - if (share->id == id) { - LOGDEBUG("Found matching share %"PRId64" in trs for client %"PRId64, - id, client->id); - found = share; - break; - } - } - ck_runlock(&cdata->lock); - - if (found) { - bool result = false; - - if (!json_get_bool(&result, val, "result")) { - LOGINFO("Failed to find result in trs share"); - goto out; - } - if (!json_is_null(json_object_get(val, "error"))) { - LOGINFO("Got error for trs share"); - goto out; - } - if (!result) { - LOGDEBUG("Rejected trs share"); - goto out; - } - LOGNOTICE("Found accepted share for client %"PRId64" - redirecting", - client->id); - ret = true; - - /* Clear the list now since we don't need it any more */ - ck_wlock(&cdata->lock); - DL_FOREACH_SAFE(client->shares, share, found) { - DL_DELETE(client->shares, share); - dealloc(share); - } - ck_wunlock(&cdata->lock); - } -out: - json_decref(val); - return ret; -} - -/* Send a client by id a heap allocated buffer, allowing this function to - * free the ram. */ -static void send_client(ckpool_t *ckp, cdata_t *cdata, const int64_t id, char *buf) -{ - sender_send_t *sender_send; - client_instance_t *client; - bool redirect = false; - int64_t pass_id; - int len; - - if (unlikely(!buf)) { - LOGWARNING("Connector send_client sent a null buffer"); - return; - } - len = strlen(buf); - if (unlikely(!len)) { - LOGWARNING("Connector send_client sent a zero length buffer"); - free(buf); - return; - } - - if (unlikely(ckp->node && !id)) { - LOGDEBUG("Message for node: %s", buf); - send_proc(ckp->stratifier, buf); - free(buf); - return; - } - - /* Grab a reference to this client until the sender_send has - * completed processing. Is this a passthrough subclient ? */ - if ((pass_id = subclient(id))) { - int64_t client_id = id & 0xffffffffll; - - /* Make sure the passthrough exists for passthrough subclients */ - client = ref_client_by_id(cdata, pass_id); - if (unlikely(!client)) { - LOGINFO("Connector failed to find passthrough id %"PRId64" of client id %"PRId64" to send to", - pass_id, client_id); - /* Now see if the subclient exists */ - client = ref_client_by_id(cdata, client_id); - if (client) { - invalidate_client(ckp, cdata, client); - dec_instance_ref(cdata, client); - } else - stratifier_drop_id(ckp, id); - free(buf); - return; - } - } else { - client = ref_client_by_id(cdata, id); - if (unlikely(!client)) { - LOGINFO("Connector failed to find client id %"PRId64" to send to", id); - stratifier_drop_id(ckp, id); - free(buf); - return; - } - if (ckp->redirector && !client->redirected && client->authorised) { - /* If clients match the IP of clients that have already - * been whitelisted as finding valid shares then - * redirect them immediately. */ - if (redirect_matches(cdata, client)) - redirect = true; - else - redirect = test_redirector_shares(cdata, client, buf); - } - } - - sender_send = ckzalloc(sizeof(sender_send_t)); - sender_send->client = client; - sender_send->buf = buf; - sender_send->len = len; - - mutex_lock(&cdata->sender_lock); - cdata->sends_generated++; - DL_APPEND(cdata->sender_sends, sender_send); - pthread_cond_signal(&cdata->sender_cond); - mutex_unlock(&cdata->sender_lock); - - /* Redirect after sending response to shares and authorise */ - if (unlikely(redirect)) - redirect_client(ckp, client); -} - -static void send_client_json(ckpool_t *ckp, cdata_t *cdata, int64_t client_id, json_t *json_msg) -{ - client_instance_t *client; - char *msg; - - if (ckp->node && (client = ref_client_by_id(cdata, client_id))) { - json_t *val = json_deep_copy(json_msg); - - json_object_set_new_nocheck(val, "client_id", json_integer(client_id)); - json_object_set_new_nocheck(val, "address", json_string(client->address_name)); - json_object_set_new_nocheck(val, "server", json_integer(client->server)); - dec_instance_ref(cdata, client); - stratifier_add_recv(ckp, val); - } - if (ckp->passthrough && client_id) - json_object_del(json_msg, "node.method"); - - msg = json_dumps(json_msg, JSON_EOL | JSON_COMPACT); - send_client(ckp, cdata, client_id, msg); - json_decref(json_msg); -} - -/* When testing if a client exists, passthrough clients don't exist when their - * parent no longer exists. */ -static bool client_exists(cdata_t *cdata, int64_t id) -{ - int64_t parent_id = subclient(id); - client_instance_t *client; - - if (parent_id) - id = parent_id; - - ck_rlock(&cdata->lock); - HASH_FIND_I64(cdata->clients, &id, client); - ck_runlock(&cdata->lock); - - return !!client; -} - -static void passthrough_client(ckpool_t *ckp, cdata_t *cdata, client_instance_t *client) -{ - json_t *val; - - LOGINFO("Connector adding passthrough client %"PRId64, client->id); - client->passthrough = true; - JSON_CPACK(val, "{sb}", "result", true); - send_client_json(ckp, cdata, client->id, val); - if (!ckp->rmem_warn) - set_recvbufsize(ckp, client->fd, 1048576); - if (!ckp->wmem_warn) - client->sendbufsize = set_sendbufsize(ckp, client->fd, 1048576); -} - -static bool connect_upstream(ckpool_t *ckp, connsock_t *cs) -{ - json_t *req, *val = NULL, *res_val, *err_val; - bool res, ret = false; - float timeout = 10; - - cksem_wait(&cs->sem); - cs->fd = connect_socket(cs->url, cs->port); - if (cs->fd < 0) { - LOGWARNING("Failed to connect to upstream server %s:%s", cs->url, cs->port); - goto out; - } - keep_sockalive(cs->fd); - - /* We want large send buffers for upstreaming messages */ - if (!ckp->rmem_warn) - set_recvbufsize(ckp, cs->fd, 2097152); - if (!ckp->wmem_warn) - cs->sendbufsiz = set_sendbufsize(ckp, cs->fd, 2097152); - - JSON_CPACK(req, "{ss,s[s]}", - "method", "mining.remote", - "params", PACKAGE"/"VERSION); - res = send_json_msg(cs, req); - json_decref(req); - if (!res) { - LOGWARNING("Failed to send message in connect_upstream"); - goto out; - } - if (read_socket_line(cs, &timeout) < 1) { - LOGWARNING("Failed to receive line in connect_upstream"); - goto out; - } - val = json_msg_result(cs->buf, &res_val, &err_val); - if (!val || !res_val) { - LOGWARNING("Failed to get a json result in connect_upstream, got: %s", - cs->buf); - goto out; - } - ret = json_is_true(res_val); - if (!ret) { - LOGWARNING("Denied upstream trusted connection"); - goto out; - } - LOGWARNING("Connected to upstream server %s:%s as trusted remote", - cs->url, cs->port); - ret = true; -out: - cksem_post(&cs->sem); - - return ret; -} - -static void usend_process(ckpool_t *ckp, char *buf) -{ - cdata_t *cdata = ckp->cdata; - connsock_t *cs = &cdata->upstream_cs; - int len, sent; - - if (unlikely(!buf || !strlen(buf))) { - LOGERR("Send empty message to usend_process"); - goto out; - } - LOGDEBUG("Sending upstream msg: %s", buf); - len = strlen(buf); - while (42) { - sent = write_socket(cs->fd, buf, len); - if (sent == len) - break; - if (cs->fd > 0) { - LOGWARNING("Upstream pool failed, attempting reconnect while caching messages"); - Close(cs->fd); - } - do - sleep(5); - while (!connect_upstream(ckp, cs)); - } -out: - free(buf); -} - -static void ping_upstream(cdata_t *cdata) -{ - char *buf; - - ASPRINTF(&buf, "{\"method\":\"ping\"}\n"); - ckmsgq_add(cdata->upstream_sends, buf); -} - -static void *urecv_process(void *arg) -{ - ckpool_t *ckp = (ckpool_t *)arg; - cdata_t *cdata = ckp->cdata; - connsock_t *cs = &cdata->upstream_cs; - bool alive = true; - - rename_proc("ureceiver"); - - pthread_detach(pthread_self()); - - while (42) { - const char *method; - float timeout = 5; - json_t *val; - int ret; - - cksem_wait(&cs->sem); - ret = read_socket_line(cs, &timeout); - if (ret < 1) { - ping_upstream(cdata); - if (likely(!ret)) { - LOGDEBUG("No message from upstream pool"); - } else { - LOGNOTICE("Failed to read from upstream pool"); - alive = false; - } - goto nomsg; - } - alive = true; - val = json_loads(cs->buf, 0, NULL); - if (unlikely(!val)) { - LOGWARNING("Received non-json msg from upstream pool %s", - cs->buf); - goto nomsg; - } - method = json_string_value(json_object_get(val, "method")); - if (unlikely(!method)) { - LOGWARNING("Failed to find method from upstream pool json %s", - cs->buf); - json_decref(val); - goto decref; - } - if (!safecmp(method, stratum_msgs[SM_TRANSACTIONS])) - parse_upstream_txns(ckp, val); - else if (!safecmp(method, stratum_msgs[SM_AUTHRESULT])) - parse_upstream_auth(ckp, val); - else if (!safecmp(method, stratum_msgs[SM_WORKINFO])) - parse_upstream_workinfo(ckp, val); - else if (!safecmp(method, stratum_msgs[SM_BLOCK])) - parse_upstream_block(ckp, val); - else if (!safecmp(method, stratum_msgs[SM_REQTXNS])) - parse_upstream_reqtxns(ckp, val); - else if (!safecmp(method, "pong")) - LOGDEBUG("Received upstream pong"); - else - LOGWARNING("Unrecognised upstream method %s", method); -decref: - json_decref(val); -nomsg: - cksem_post(&cs->sem); - - if (!alive) - sleep(5); - } - return NULL; -} - -static bool setup_upstream(ckpool_t *ckp, cdata_t *cdata) -{ - connsock_t *cs = &cdata->upstream_cs; - bool ret = false; - pthread_t pth; - - cs->ckp = ckp; - if (!ckp->upstream) { - LOGEMERG("No upstream server set in remote trusted server mode"); - goto out; - } - if (!extract_sockaddr(ckp->upstream, &cs->url, &cs->port)) { - LOGEMERG("Failed to extract upstream address from %s", ckp->upstream); - goto out; - } - - cksem_init(&cs->sem); - cksem_post(&cs->sem); - - while (!connect_upstream(ckp, cs)) - cksleep_ms(5000); - - create_pthread(&pth, urecv_process, ckp); - cdata->upstream_sends = create_ckmsgq(ckp, "usender", &usend_process); - ret = true; -out: - return ret; -} - -static void client_message_processor(ckpool_t *ckp, json_t *json_msg) -{ - cdata_t *cdata = ckp->cdata; - client_instance_t *client; - int64_t client_id; - - /* Extract the client id from the json message and remove its entry */ - client_id = json_integer_value(json_object_get(json_msg, "client_id")); - json_object_del(json_msg, "client_id"); - /* Put client_id back in for a passthrough subclient, passing its - * upstream client_id instead of the passthrough's. */ - if (subclient(client_id)) - json_object_set_new_nocheck(json_msg, "client_id", json_integer(client_id & 0xffffffffll)); - - /* Flag redirector clients once they've been authorised */ - if (ckp->redirector && (client = ref_client_by_id(cdata, client_id))) { - if (!client->redirected && !client->authorised) { - json_t *method_val = json_object_get(json_msg, "node.method"); - const char *method = json_string_value(method_val); - - if (!safecmp(method, stratum_msgs[SM_AUTHRESULT])) - client->authorised = true; - } - dec_instance_ref(cdata, client); - } - send_client_json(ckp, cdata, client_id, json_msg); -} - -void connector_add_message(ckpool_t *ckp, json_t *val) -{ - cdata_t *cdata = ckp->cdata; - - ckmsgq_add(cdata->cmpq, val); -} - -/* Send the passthrough the terminate node.method */ -static void drop_passthrough_client(ckpool_t *ckp, cdata_t *cdata, const int64_t id) -{ - int64_t client_id; - char *msg; - - LOGINFO("Asked to drop passthrough client %"PRId64", forwarding to passthrough", id); - client_id = id & 0xffffffffll; - /* We have a direct connection to the passthrough's connector so we - * can send it any regular commands. */ - ASPRINTF(&msg, "dropclient=%"PRId64"\n", client_id); - send_client(ckp, cdata, id, msg); -} - -char *connector_stats(void *data, const int runtime) -{ - json_t *val = json_object(), *subval; - client_instance_t *client; - int objects, generated; - cdata_t *cdata = data; - sender_send_t *send; - int64_t memsize; - char *buf; - - /* If called in passthrough mode we log stats instead of the stratifier */ - if (runtime) - json_set_int(val, "runtime", runtime); - - ck_rlock(&cdata->lock); - objects = HASH_COUNT(cdata->clients); - memsize = SAFE_HASH_OVERHEAD(cdata->clients) + sizeof(client_instance_t) * objects; - generated = cdata->clients_generated; - ck_runlock(&cdata->lock); - - JSON_CPACK(subval, "{si,si,si}", "count", objects, "memory", memsize, "generated", generated); - json_set_object(val, "clients", subval); - - ck_rlock(&cdata->lock); - DL_COUNT2(cdata->dead_clients, client, objects, dead_next); - generated = cdata->dead_generated; - ck_runlock(&cdata->lock); - - memsize = objects * sizeof(client_instance_t); - JSON_CPACK(subval, "{si,si,si}", "count", objects, "memory", memsize, "generated", generated); - json_set_object(val, "dead", subval); - - objects = 0; - memsize = 0; - - mutex_lock(&cdata->sender_lock); - DL_FOREACH(cdata->sender_sends, send) { - objects++; - memsize += sizeof(sender_send_t) + send->len + 1; - } - JSON_CPACK(subval, "{si,si,si}", "count", objects, "memory", memsize, "generated", cdata->sends_generated); - json_set_object(val, "sends", subval); - - JSON_CPACK(subval, "{si,si,si}", "count", cdata->sends_queued, "memory", cdata->sends_size, "generated", cdata->sends_delayed); - mutex_unlock(&cdata->sender_lock); - - json_set_object(val, "delays", subval); - - buf = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER); - json_decref(val); - if (runtime) - LOGNOTICE("Passthrough:%s", buf); - else - LOGNOTICE("Connector stats: %s", buf); - return buf; -} - -void connector_send_fd(ckpool_t *ckp, const int fdno, const int sockd) -{ - cdata_t *cdata = ckp->cdata; - - if (fdno > -1 && fdno < ckp->serverurls) - send_fd(cdata->serverfd[fdno], sockd); - else - LOGWARNING("Connector asked to send invalid fd %d", fdno); -} - -static void connector_loop(proc_instance_t *pi, cdata_t *cdata) -{ - unix_msg_t *umsg = NULL; - ckpool_t *ckp = pi->ckp; - time_t last_stats; - int64_t client_id; - int ret = 0; - char *buf; - - last_stats = cdata->start_time; - -retry: - if (ckp->passthrough) { - time_t diff = time(NULL); - - if (diff - last_stats >= 60) { - last_stats = diff; - diff -= cdata->start_time; - buf = connector_stats(cdata, diff); - dealloc(buf); - } - } - - if (umsg) { - Close(umsg->sockd); - free(umsg->buf); - dealloc(umsg); - } - - do { - umsg = get_unix_msg(pi); - } while (!umsg); - - buf = umsg->buf; - LOGDEBUG("Connector received message: %s", buf); - /* The bulk of the messages will be json messages to send to clients - * so look for them first. */ - if (likely(buf[0] == '{')) { - json_t *val = json_loads(buf, JSON_DISABLE_EOF_CHECK, NULL); - - ckmsgq_add(cdata->cmpq, val); - } else if (cmdmatch(buf, "dropclient")) { - client_instance_t *client; - - ret = sscanf(buf, "dropclient=%"PRId64, &client_id); - if (ret < 0) { - LOGDEBUG("Connector failed to parse dropclient command: %s", buf); - goto retry; - } - /* A passthrough client */ - if (subclient(client_id)) { - drop_passthrough_client(ckp, cdata, client_id); - goto retry; - } - client = ref_client_by_id(cdata, client_id); - if (unlikely(!client)) { - LOGINFO("Connector failed to find client id %"PRId64" to drop", client_id); - goto retry; - } - ret = invalidate_client(ckp, cdata, client); - dec_instance_ref(cdata, client); - if (ret >= 0) - LOGINFO("Connector dropped client id: %"PRId64, client_id); - } else if (cmdmatch(buf, "testclient")) { - ret = sscanf(buf, "testclient=%"PRId64, &client_id); - if (unlikely(ret < 0)) { - LOGDEBUG("Connector failed to parse testclient command: %s", buf); - goto retry; - } - if (client_exists(cdata, client_id)) - goto retry; - LOGINFO("Connector detected non-existent client id: %"PRId64, client_id); - stratifier_drop_id(ckp, client_id); - } else if (cmdmatch(buf, "ping")) { - LOGDEBUG("Connector received ping request"); - send_unix_msg(umsg->sockd, "pong"); - } else if (cmdmatch(buf, "accept")) { - LOGDEBUG("Connector received accept signal"); - cdata->accept = true; - } else if (cmdmatch(buf, "reject")) { - LOGDEBUG("Connector received reject signal"); - cdata->accept = false; - if (ckp->passthrough) - drop_all_clients(cdata); - } else if (cmdmatch(buf, "stats")) { - char *msg; - - LOGDEBUG("Connector received stats request"); - msg = connector_stats(cdata, 0); - send_unix_msg(umsg->sockd, msg); - } else if (cmdmatch(buf, "loglevel")) { - sscanf(buf, "loglevel=%d", &ckp->loglevel); - } else if (cmdmatch(buf, "passthrough")) { - client_instance_t *client; - - ret = sscanf(buf, "passthrough=%"PRId64, &client_id); - if (ret < 0) { - LOGDEBUG("Connector failed to parse passthrough command: %s", buf); - goto retry; - } - client = ref_client_by_id(cdata, client_id); - if (unlikely(!client)) { - LOGINFO("Connector failed to find client id %"PRId64" to pass through", client_id); - goto retry; - } - passthrough_client(ckp, cdata, client); - dec_instance_ref(cdata, client); - } else if (cmdmatch(buf, "getxfd")) { - int fdno = -1; - - sscanf(buf, "getxfd%d", &fdno); - if (fdno > -1 && fdno < ckp->serverurls) - send_fd(cdata->serverfd[fdno], umsg->sockd); - } else - LOGWARNING("Unhandled connector message: %s", buf); - goto retry; -} - -void *connector(void *arg) -{ - proc_instance_t *pi = (proc_instance_t *)arg; - cdata_t *cdata = ckzalloc(sizeof(cdata_t)); - char newurl[INET6_ADDRSTRLEN], newport[8]; - int threads, sockd, i, tries = 0, ret; - ckpool_t *ckp = pi->ckp; - const int on = 1; - - rename_proc(pi->processname); - LOGWARNING("%s connector starting", ckp->name); - ckp->cdata = cdata; - cdata->ckp = ckp; - - if (!ckp->serverurls) { - /* No serverurls have been specified. Bind to all interfaces - * on default sockets. */ - struct sockaddr_in serv_addr; - - cdata->serverfd = ckalloc(sizeof(int *)); - - sockd = socket(AF_INET, SOCK_STREAM, 0); - if (sockd < 0) { - LOGERR("Connector failed to open socket"); - goto out; - } - setsockopt(sockd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); - memset(&serv_addr, 0, sizeof(serv_addr)); - serv_addr.sin_family = AF_INET; - serv_addr.sin_addr.s_addr = htonl(INADDR_ANY); - serv_addr.sin_port = htons(ckp->proxy ? 3334 : 3333); - do { - ret = bind(sockd, (struct sockaddr*)&serv_addr, sizeof(serv_addr)); - - if (!ret) - break; - LOGWARNING("Connector failed to bind to socket, retrying in 5s"); - sleep(5); - } while (++tries < 25); - if (ret < 0) { - LOGERR("Connector failed to bind to socket for 2 minutes"); - Close(sockd); - goto out; - } - /* Set listen backlog to larger than SOMAXCONN in case the - * system configuration supports it */ - if (listen(sockd, 8192) < 0) { - LOGERR("Connector failed to listen on socket"); - Close(sockd); - goto out; - } - cdata->serverfd[0] = sockd; - url_from_socket(sockd, newurl, newport); - ASPRINTF(&ckp->serverurl[0], "%s:%s", newurl, newport); - ckp->serverurls = 1; - } else { - cdata->serverfd = ckalloc(sizeof(int *) * ckp->serverurls); - - for (i = 0; i < ckp->serverurls; i++) { - char oldurl[INET6_ADDRSTRLEN], oldport[8]; - char *serverurl = ckp->serverurl[i]; - int port; - - if (!url_from_serverurl(serverurl, newurl, newport)) { - LOGWARNING("Failed to extract resolved url from %s", serverurl); - goto out; - } - port = atoi(newport); - /* All high port servers are treated as highdiff ports */ - if (port > 4000) { - LOGNOTICE("Highdiff server %s", serverurl); - ckp->server_highdiff[i] = true; - } - sockd = ckp->oldconnfd[i]; - if (url_from_socket(sockd, oldurl, oldport)) { - if (strcmp(newurl, oldurl) || strcmp(newport, oldport)) { - LOGWARNING("Handed over socket url %s:%s does not match config %s:%s, creating new socket", - oldurl, oldport, newurl, newport); - Close(sockd); - } - } - - do { - if (sockd > 0) - break; - sockd = bind_socket(newurl, newport); - if (sockd > 0) - break; - LOGWARNING("Connector failed to bind to socket, retrying in 5s"); - sleep(5); - } while (++tries < 25); - - if (sockd < 0) { - LOGERR("Connector failed to bind to socket for 2 minutes"); - goto out; - } - if (listen(sockd, 8192) < 0) { - LOGERR("Connector failed to listen on socket"); - Close(sockd); - goto out; - } - cdata->serverfd[i] = sockd; - } - } - - if (tries) - LOGWARNING("Connector successfully bound to socket"); - - cdata->cmpq = create_ckmsgq(ckp, "cmpq", &client_message_processor); - - if (ckp->remote && !setup_upstream(ckp, cdata)) - goto out; - - cklock_init(&cdata->lock); - cdata->pi = pi; - cdata->nfds = 0; - /* Set the client id to the highest serverurl count to distinguish - * them from the server fds in epoll. */ - cdata->client_ids = ckp->serverurls; - mutex_init(&cdata->sender_lock); - cond_init(&cdata->sender_cond); - create_pthread(&cdata->pth_sender, sender, cdata); - threads = sysconf(_SC_NPROCESSORS_ONLN) / 2 ? : 1; - cdata->cevents = create_ckmsgqs(ckp, "cevent", &client_event_processor, threads); - create_pthread(&cdata->pth_receiver, receiver, cdata); - cdata->start_time = time(NULL); - - ckp->connector_ready = true; - LOGWARNING("%s connector ready", ckp->name); - - connector_loop(pi, cdata); -out: - /* We should never get here unless there's a fatal error */ - LOGEMERG("Connector failure, shutting down"); - exit(1); - return NULL; -} diff --git a/solo-ckpool-source/src/connector.h b/solo-ckpool-source/src/connector.h deleted file mode 100644 index be945ef..0000000 --- a/solo-ckpool-source/src/connector.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright 2014-2016 Con Kolivas - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 3 of the License, or (at your option) - * any later version. See COPYING for more details. - */ - -#ifndef CONNECTOR_H -#define CONNECTOR_H - -int64_t connector_newclientid(ckpool_t *ckp); -void connector_upstream_msg(ckpool_t *ckp, char *msg); -void connector_add_message(ckpool_t *ckp, json_t *val); -char *connector_stats(void *data, const int runtime); -void connector_send_fd(ckpool_t *ckp, const int fdno, const int sockd); -void *connector(void *arg); - -#endif /* CONNECTOR_H */ diff --git a/solo-ckpool-source/src/generator.c b/solo-ckpool-source/src/generator.c deleted file mode 100644 index 22e2e08..0000000 --- a/solo-ckpool-source/src/generator.c +++ /dev/null @@ -1,3422 +0,0 @@ -/* - * Copyright 2014-2017,2023 Con Kolivas - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 3 of the License, or (at your option) - * any later version. See COPYING for more details. - */ - -#include "config.h" - -#include -#include -#include -#include -#include - -#include "ckpool.h" -#include "libckpool.h" -#include "generator.h" -#include "stratifier.h" -#include "bitcoin.h" -#include "uthash.h" -#include "utlist.h" - -struct notify_instance { - /* Hash table data */ - UT_hash_handle hh; - int64_t id64; - - char prevhash[68]; - json_t *jobid; - char *coinbase1; - char *coinbase2; - int coinb1len; - int merkles; - char merklehash[16][68]; - char nbit[12]; - char ntime[12]; - char bbversion[12]; - bool clean; - - time_t notify_time; -}; - -typedef struct notify_instance notify_instance_t; - -typedef struct proxy_instance proxy_instance_t; - -struct share_msg { - UT_hash_handle hh; - int64_t id64; // Our own id for submitting upstream - - int64_t client_id; - time_t submit_time; - double diff; -}; - -typedef struct share_msg share_msg_t; - -struct stratum_msg { - struct stratum_msg *next; - struct stratum_msg *prev; - - json_t *json_msg; - int64_t client_id; -}; - -typedef struct stratum_msg stratum_msg_t; - -struct pass_msg { - proxy_instance_t *proxy; - connsock_t *cs; - char *msg; -}; - -typedef struct pass_msg pass_msg_t; -typedef struct cs_msg cs_msg_t; - -/* Statuses of various proxy states - connect, subscribe and auth */ -enum proxy_stat { - STATUS_INIT = 0, - STATUS_SUCCESS, - STATUS_FAIL -}; - -static const char *proxy_status[] = { - "Initial", - "Success", - "Failed" -}; - -/* Per proxied pool instance data */ -struct proxy_instance { - UT_hash_handle hh; /* Proxy list */ - UT_hash_handle sh; /* Subproxy list */ - proxy_instance_t *next; /* For dead proxy list */ - proxy_instance_t *prev; /* For dead proxy list */ - - ckpool_t *ckp; - connsock_t cs; - bool passthrough; - bool node; - int id; /* Proxy server id*/ - int subid; /* Subproxy id */ - int userid; /* User id if this proxy is bound to a user */ - - char *baseurl; - char *url; - char *auth; - char *pass; - - char *enonce1; - char *enonce1bin; - int nonce1len; - int nonce2len; - - tv_t last_message; - - double diff; - double diff_accepted; - double diff_rejected; - double total_accepted; /* Used only by parent proxy structures */ - double total_rejected; /* "" */ - tv_t last_share; - - /* Diff shares per second for 1/5/60... minute rolling averages */ - double dsps1; - double dsps5; - double dsps60; - double dsps360; - double dsps1440; - tv_t last_decay; - - /* Total diff shares per second for all subproxies */ - double tdsps1; /* Used only by parent proxy structures */ - double tdsps5; /* "" */ - double tdsps60; /* "" */ - double tdsps360; /* "" */ - double tdsps1440; /* "" */ - tv_t total_last_decay; - - bool no_params; /* Doesn't want any parameters on subscribe */ - - bool global; /* Part of the global list of proxies */ - bool disabled; /* Subproxy no longer to be used */ - bool reconnect; /* We need to drop and reconnect */ - bool reconnecting; /* Testing of parent in progress */ - int64_t recruit; /* No of recruiting requests in progress */ - bool alive; - bool authorised; - - /* Which of STATUS_* states are these in */ - enum proxy_stat connect_status; - enum proxy_stat subscribe_status; - enum proxy_stat auth_status; - - /* Back off from retrying if we fail one of the above */ - int backoff; - - /* Are we in the middle of a blocked write of this message? */ - cs_msg_t *sending; - - pthread_t pth_precv; - - ckmsgq_t *passsends; // passthrough sends - - char_entry_t *recvd_lines; /* Linked list of unprocessed messages */ - - int epfd; /* Epoll fd used by the parent proxy */ - - mutex_t proxy_lock; /* Lock protecting hashlist of proxies */ - proxy_instance_t *parent; /* Parent proxy of subproxies */ - proxy_instance_t *subproxies; /* Hashlist of subproxies of this proxy */ - int64_t clients_per_proxy; /* Max number of clients of this proxy */ - int subproxy_count; /* Number of subproxies */ -}; - -/* Private data for the generator */ -struct generator_data { - ckpool_t *ckp; - mutex_t lock; /* Lock protecting linked lists */ - proxy_instance_t *proxies; /* Hash list of all proxies */ - proxy_instance_t *dead_proxies; /* Disabled proxies */ - int proxies_generated; - int subproxies_generated; - - int64_t proxy_notify_id; // Globally increasing notify id - pthread_t pth_uprecv; // User proxy receive thread - pthread_t pth_psend; // Combined proxy send thread - - mutex_t psend_lock; // Lock associated with conditional below - pthread_cond_t psend_cond; - - stratum_msg_t *psends; - int psends_generated; - - mutex_t notify_lock; - notify_instance_t *notify_instances; - - mutex_t share_lock; - share_msg_t *shares; - int64_t share_id; - - server_instance_t *current_si; // Current server instance - - proxy_instance_t *current_proxy; -}; - -typedef struct generator_data gdata_t; - -/* Use a temporary fd when testing server_alive to avoid races on cs->fd */ -static bool server_alive(ckpool_t *ckp, server_instance_t *si, bool pinging) -{ - char *userpass = NULL; - bool ret = false; - connsock_t *cs; - gbtbase_t gbt; - int fd; - - if (si->alive) - return true; - cs = &si->cs; - if (!extract_sockaddr(si->url, &cs->url, &cs->port)) { - LOGWARNING("Failed to extract address from %s", si->url); - return ret; - } - userpass = strdup(si->auth); - realloc_strcat(&userpass, ":"); - realloc_strcat(&userpass, si->pass); - dealloc(cs->auth); - cs->auth = http_base64(userpass); - if (!cs->auth) { - LOGWARNING("Failed to create base64 auth from %s", userpass); - dealloc(userpass); - return ret; - } - dealloc(userpass); - - fd = connect_socket(cs->url, cs->port); - if (fd < 0) { - if (!pinging) - LOGWARNING("Failed to connect socket to %s:%s !", cs->url, cs->port); - return ret; - } - - /* Test we can connect, authorise and get a block template */ - if (!gen_gbtbase(cs, &gbt)) { - if (!pinging) { - LOGINFO("Failed to get test block template from %s:%s!", - cs->url, cs->port); - } - goto out; - } - clear_gbtbase(&gbt); - if (unlikely(ckp->btcsolo && !ckp->btcaddress)) { - /* If no btcaddress is specified in solobtc mode, choose one of - * the donation addresses from mainnet, testnet, or regtest for - * coinbase validation later on, although it will not be used - * for mining. */ - if (validate_address(cs, ckp->donaddress, &ckp->script, &ckp->segwit)) - ckp->btcaddress = ckp->donaddress; - else if (validate_address(cs, ckp->tndonaddress, &ckp->script, &ckp->segwit)) - ckp->btcaddress = ckp->tndonaddress; - else if (validate_address(cs, ckp->rtdonaddress, &ckp->script, &ckp->segwit)) - ckp->btcaddress = ckp->rtdonaddress; - } - - if (!ckp->node && !validate_address(cs, ckp->btcaddress, &ckp->script, &ckp->segwit)) { - LOGWARNING("Invalid btcaddress: %s !", ckp->btcaddress); - goto out; - } - si->alive = cs->alive = ret = true; - LOGNOTICE("Server alive: %s:%s", cs->url, cs->port); -out: - /* Close the file handle */ - close(fd); - return ret; -} - -/* Find the highest priority server alive and return it */ -static server_instance_t *live_server(ckpool_t *ckp, gdata_t *gdata) -{ - server_instance_t *alive = NULL; - connsock_t *cs; - int i; - - LOGDEBUG("Attempting to connect to bitcoind"); -retry: - /* First find a server that is already flagged alive if possible - * without blocking on server_alive() */ - for (i = 0; i < ckp->btcds; i++) { - server_instance_t *si = ckp->servers[i]; - cs = &si->cs; - - if (si->alive) { - alive = si; - goto living; - } - } - - /* No servers flagged alive, try to connect to them blocking */ - for (i = 0; i < ckp->btcds; i++) { - server_instance_t *si = ckp->servers[i]; - - if (server_alive(ckp, si, false)) { - alive = si; - goto living; - } - } - LOGWARNING("CRITICAL: No bitcoinds active!"); - sleep(5); - goto retry; -living: - gdata->current_si = alive; - cs = &alive->cs; - LOGINFO("Connected to live server %s:%s", cs->url, cs->port); - send_proc(ckp->connector, alive ? "accept" : "reject"); - return alive; -} - -static void kill_server(server_instance_t *si) -{ - connsock_t *cs; - - if (!si) // This shouldn't happen - return; - - LOGNOTICE("Killing server"); - cs = &si->cs; - Close(cs->fd); - empty_buffer(cs); - dealloc(cs->url); - dealloc(cs->port); - dealloc(cs->auth); -} - -static void clear_unix_msg(unix_msg_t **umsg) -{ - if (*umsg) { - Close((*umsg)->sockd); - free((*umsg)->buf); - free(*umsg); - *umsg = NULL; - } -} - -bool generator_submitblock(ckpool_t *ckp, const char *buf) -{ - gdata_t *gdata = ckp->gdata; - server_instance_t *si; - bool warn = false; - connsock_t *cs; - - while (unlikely(!(si = gdata->current_si))) { - if (!warn) - LOGWARNING("No live current server in generator_blocksubmit! Resubmitting indefinitely!"); - warn = true; - cksleep_ms(10); - } - cs = &si->cs; - LOGNOTICE("Submitting block data!"); - return submit_block(cs, buf); -} - -void generator_preciousblock(ckpool_t *ckp, const char *hash) -{ - gdata_t *gdata = ckp->gdata; - server_instance_t *si; - connsock_t *cs; - - if (unlikely(!(si = gdata->current_si))) { - LOGWARNING("No live current server in generator_get_blockhash"); - return; - } - cs = &si->cs; - precious_block(cs, hash); -} - -bool generator_get_blockhash(ckpool_t *ckp, int height, char *hash) -{ - gdata_t *gdata = ckp->gdata; - server_instance_t *si; - connsock_t *cs; - - if (unlikely(!(si = gdata->current_si))) { - LOGWARNING("No live current server in generator_get_blockhash"); - return false; - } - cs = &si->cs; - return get_blockhash(cs, height, hash); -} - -static void gen_loop(proc_instance_t *pi) -{ - server_instance_t *si = NULL, *old_si; - unix_msg_t *umsg = NULL; - ckpool_t *ckp = pi->ckp; - char *buf = NULL; - connsock_t *cs; - gbtbase_t gbt; - char hash[68]; - -reconnect: - clear_unix_msg(&umsg); - old_si = si; - si = live_server(ckp, ckp->gdata); - if (!si) - goto out; - if (unlikely(!ckp->generator_ready)) { - ckp->generator_ready = true; - LOGWARNING("%s generator ready", ckp->name); - } - - cs = &si->cs; - if (!old_si) - LOGWARNING("Connected to bitcoind: %s:%s", cs->url, cs->port); - else if (si != old_si) - LOGWARNING("Failed over to bitcoind: %s:%s", cs->url, cs->port); - -retry: - clear_unix_msg(&umsg); - - do { - umsg = get_unix_msg(pi); - } while (!umsg); - - if (unlikely(!si->alive)) { - LOGWARNING("%s:%s Bitcoind socket invalidated, will attempt failover", cs->url, cs->port); - goto reconnect; - } - - buf = umsg->buf; - LOGDEBUG("Generator received request: %s", buf); - if (cmdmatch(buf, "getbase")) { - if (!gen_gbtbase(cs, &gbt)) { - LOGWARNING("Failed to get block template from %s:%s", - cs->url, cs->port); - si->alive = cs->alive = false; - send_unix_msg(umsg->sockd, "Failed"); - goto reconnect; - } else { - char *s = json_dumps(gbt.json, JSON_NO_UTF8); - - send_unix_msg(umsg->sockd, s); - free(s); - clear_gbtbase(&gbt); - } - } else if (cmdmatch(buf, "getbest")) { - if (si->notify) - send_unix_msg(umsg->sockd, "notify"); - else if (!get_bestblockhash(cs, hash)) { - LOGINFO("No best block hash support from %s:%s", - cs->url, cs->port); - si->alive = cs->alive = false; - send_unix_msg(umsg->sockd, "failed"); - } else { - send_unix_msg(umsg->sockd, hash); - } - } else if (cmdmatch(buf, "getlast")) { - int height; - - if (si->notify) - send_unix_msg(umsg->sockd, "notify"); - else if ((height = get_blockcount(cs)) == -1) { - si->alive = cs->alive = false; - send_unix_msg(umsg->sockd, "failed"); - goto reconnect; - } else { - LOGDEBUG("Height: %d", height); - if (!get_blockhash(cs, height, hash)) { - si->alive = cs->alive = false; - send_unix_msg(umsg->sockd, "failed"); - goto reconnect; - } else { - send_unix_msg(umsg->sockd, hash); - LOGDEBUG("Hash: %s", hash); - } - } - } else if (cmdmatch(buf, "submitblock:")) { - char blockmsg[80]; - bool ret; - - LOGNOTICE("Submitting block data!"); - ret = submit_block(cs, buf + 12 + 64 + 1); - memset(buf + 12 + 64, 0, 1); - sprintf(blockmsg, "%sblock:%s", ret ? "" : "no", buf + 12); - send_proc(ckp->stratifier, blockmsg); - } else if (cmdmatch(buf, "reconnect")) { - goto reconnect; - } else if (cmdmatch(buf, "loglevel")) { - sscanf(buf, "loglevel=%d", &ckp->loglevel); - } else if (cmdmatch(buf, "ping")) { - LOGDEBUG("Generator received ping request"); - send_unix_msg(umsg->sockd, "pong"); - } - goto retry; - -out: - kill_server(si); -} - -static bool connect_proxy(ckpool_t *ckp, connsock_t *cs, proxy_instance_t *proxy) -{ - if (cs->fd > 0) { - epoll_ctl(proxy->epfd, EPOLL_CTL_DEL, cs->fd, NULL); - Close(cs->fd); - } - cs->fd = connect_socket(cs->url, cs->port); - if (cs->fd < 0) { - LOGINFO("Failed to connect socket to %s:%s in connect_proxy", - cs->url, cs->port); - return false; - } - keep_sockalive(cs->fd); - if (!ckp->passthrough) { - struct epoll_event event; - - event.events = EPOLLIN | EPOLLRDHUP; - event.data.ptr = proxy; - /* Add this connsock_t to the epoll list */ - if (unlikely(epoll_ctl(proxy->epfd, EPOLL_CTL_ADD, cs->fd, &event) == -1)) { - LOGERR("Failed to add fd %d to epfd %d to epoll_ctl in proxy_alive", - cs->fd, proxy->epfd); - return false; - } - } else { - /* We want large send/recv buffers on passthroughs */ - if (!ckp->rmem_warn) - cs->rcvbufsiz = set_recvbufsize(ckp, cs->fd, 1048576); - if (!ckp->wmem_warn) - cs->sendbufsiz = set_sendbufsize(ckp, cs->fd, 1048576); - } - return true; -} - -/* For some reason notify is buried at various different array depths so use - * a reentrant function to try and find it. */ -static json_t *find_notify(json_t *val) -{ - int arr_size, i; - json_t *ret = NULL; - const char *entry; - - if (!json_is_array(val)) - return NULL; - arr_size = json_array_size(val); - entry = json_string_value(json_array_get(val, 0)); - if (cmdmatch(entry, "mining.notify")) - return val; - for (i = 0; i < arr_size; i++) { - json_t *arr_val; - - arr_val = json_array_get(val, i); - ret = find_notify(arr_val); - if (ret) - break; - } - return ret; -} - -/* Get stored line in the proxy linked list of messages if any exist or NULL */ -static char *cached_proxy_line(proxy_instance_t *proxi) -{ - char *buf = NULL; - - if (proxi->recvd_lines) { - char_entry_t *char_t = proxi->recvd_lines; - - DL_DELETE(proxi->recvd_lines, char_t); - buf = char_t->buf; - free(char_t); - } - return buf; -} - -/* Get next line in the proxy linked list of messages or a new line from the - * connsock if there are none. */ -static char *next_proxy_line(connsock_t *cs, proxy_instance_t *proxi) -{ - char *buf = cached_proxy_line(proxi); - float timeout = 10; - - if (!buf && read_socket_line(cs, &timeout) > 0) - buf = strdup(cs->buf); - return buf; -} - -/* For appending a line to the proxy recv list */ -static void append_proxy_line(proxy_instance_t *proxi, const char *buf) -{ - char_entry_t *char_t = ckalloc(sizeof(char_entry_t)); - char_t->buf = strdup(buf); - DL_APPEND(proxi->recvd_lines, char_t); -} - -/* Get a new line from the connsock and return a copy of it */ -static char *new_proxy_line(connsock_t *cs) -{ - float timeout = 10; - char *buf = NULL; - - if (read_socket_line(cs, &timeout) < 1) - goto out; - buf = strdup(cs->buf); -out: - return buf; -} - -static inline bool parent_proxy(const proxy_instance_t *proxy) -{ - return (proxy->parent == proxy); -} - -static void recruit_subproxies(proxy_instance_t *proxi, const int recruits); - -static bool parse_subscribe(connsock_t *cs, proxy_instance_t *proxi) -{ - json_t *val = NULL, *res_val, *notify_val, *tmp; - bool parsed, ret = false; - int retries = 0, size; - const char *string; - char *buf, *old; - -retry: - parsed = true; - if (!(buf = new_proxy_line(cs))) { - LOGNOTICE("Proxy %d:%d %s failed to receive line in parse_subscribe", - proxi->id, proxi->subid, proxi->url); - goto out; - } - LOGDEBUG("parse_subscribe received %s", buf); - /* Ignore err_val here stored in &tmp */ - val = json_msg_result(buf, &res_val, &tmp); - if (!val || !res_val) { - LOGINFO("Failed to get a json result in parse_subscribe, got: %s", buf); - parsed = false; - } - if (!json_is_array(res_val)) { - LOGINFO("Result in parse_subscribe not an array"); - parsed = false; - } - size = json_array_size(res_val); - if (size < 3) { - LOGINFO("Result in parse_subscribe array too small"); - parsed = false; - } - notify_val = find_notify(res_val); - if (!notify_val) { - LOGINFO("Failed to find notify in parse_subscribe"); - parsed = false; - } - if (!parsed) { - if (++retries < 3) { - /* We don't want this response so put it on the proxy - * recvd list to be parsed later */ - append_proxy_line(proxi, buf); - buf = NULL; - goto retry; - } - LOGNOTICE("Proxy %d:%d %s failed to parse subscribe response in parse_subscribe", - proxi->id, proxi->subid, proxi->url); - goto out; - } - - tmp = json_array_get(res_val, 1); - if (!tmp || !json_is_string(tmp)) { - LOGWARNING("Failed to parse enonce1 in parse_subscribe"); - goto out; - } - string = json_string_value(tmp); - old = proxi->enonce1; - proxi->enonce1 = strdup(string); - free(old); - proxi->nonce1len = strlen(proxi->enonce1) / 2; - if (proxi->nonce1len > 15) { - LOGWARNING("Nonce1 too long at %d", proxi->nonce1len); - goto out; - } - old = proxi->enonce1bin; - proxi->enonce1bin = ckalloc(proxi->nonce1len); - free(old); - hex2bin(proxi->enonce1bin, proxi->enonce1, proxi->nonce1len); - tmp = json_array_get(res_val, 2); - if (!tmp || !json_is_integer(tmp)) { - LOGWARNING("Failed to parse nonce2len in parse_subscribe"); - goto out; - } - size = json_integer_value(tmp); - if (size < 1 || size > 8) { - LOGWARNING("Invalid nonce2len %d in parse_subscribe", size); - goto out; - } - if (size < 3) { - if (!proxi->subid) { - LOGWARNING("Proxy %d %s Nonce2 length %d too small for fast miners", - proxi->id, proxi->url, size); - } else { - LOGNOTICE("Proxy %d:%d Nonce2 length %d too small for fast miners", - proxi->id, proxi->subid, size); - } - } - proxi->nonce2len = size; - proxi->clients_per_proxy = 1ll << ((size - 3) * 8); - - LOGNOTICE("Found notify for new proxy %d:%d with enonce %s nonce2len %d", proxi->id, - proxi->subid, proxi->enonce1, proxi->nonce2len); - ret = true; - -out: - if (val) - json_decref(val); - free(buf); - return ret; -} - -/* cs semaphore must be held */ -static bool subscribe_stratum(ckpool_t *ckp, connsock_t *cs, proxy_instance_t *proxi) -{ - bool ret = false; - json_t *req; - -retry: - /* Attempt to connect with the client description g*/ - if (!proxi->no_params) { - JSON_CPACK(req, "{s:i,s:s,s:[s]}", - "id", 0, - "method", "mining.subscribe", - "params", PACKAGE"/"VERSION); - /* Then try without any parameters */ - } else { - JSON_CPACK(req, "{s:i,s:s,s:[]}", - "id", 0, - "method", "mining.subscribe", - "params"); - } - ret = send_json_msg(cs, req); - json_decref(req); - if (!ret) { - LOGNOTICE("Proxy %d:%d %s failed to send message in subscribe_stratum", - proxi->id, proxi->subid, proxi->url); - goto out; - } - ret = parse_subscribe(cs, proxi); - if (ret) - goto out; - - if (proxi->no_params) { - LOGNOTICE("Proxy %d:%d %s failed all subscription options in subscribe_stratum", - proxi->id, proxi->subid, proxi->url); - goto out; - } - LOGINFO("Proxy %d:%d %s failed connecting with parameters in subscribe_stratum, retrying without", - proxi->id, proxi->subid, proxi->url); - proxi->no_params = true; - ret = connect_proxy(ckp, cs, proxi); - if (!ret) { - LOGNOTICE("Proxy %d:%d %s failed to reconnect in subscribe_stratum", - proxi->id, proxi->subid, proxi->url); - goto out; - } - goto retry; - -out: - if (!ret && cs->fd > 0) { - epoll_ctl(proxi->epfd, EPOLL_CTL_DEL, cs->fd, NULL); - Close(cs->fd); - } - return ret; -} - -/* cs semaphore must be held */ -static bool passthrough_stratum(connsock_t *cs, proxy_instance_t *proxi) -{ - json_t *req, *val = NULL, *res_val, *err_val; - bool res, ret = false; - float timeout = 10; - - JSON_CPACK(req, "{ss,s[s]}", - "method", "mining.passthrough", - "params", PACKAGE"/"VERSION); - res = send_json_msg(cs, req); - json_decref(req); - if (!res) { - LOGWARNING("Failed to send message in passthrough_stratum"); - goto out; - } - if (read_socket_line(cs, &timeout) < 1) { - LOGWARNING("Failed to receive line in passthrough_stratum"); - goto out; - } - /* Ignore err_val here since we should always get a result from an - * upstream passthrough server */ - val = json_msg_result(cs->buf, &res_val, &err_val); - if (!val || !res_val) { - LOGWARNING("Failed to get a json result in passthrough_stratum, got: %s", - cs->buf); - goto out; - } - ret = json_is_true(res_val); - if (!ret) { - LOGWARNING("Denied passthrough for stratum"); - goto out; - } - proxi->passthrough = true; -out: - if (val) - json_decref(val); - if (!ret) - Close(cs->fd); - return ret; -} - -/* cs semaphore must be held */ -static bool node_stratum(connsock_t *cs, proxy_instance_t *proxi) -{ - json_t *req, *val = NULL, *res_val, *err_val; - bool res, ret = false; - float timeout = 10; - - JSON_CPACK(req, "{ss,s[s]}", - "method", "mining.node", - "params", PACKAGE"/"VERSION); - - res = send_json_msg(cs, req); - json_decref(req); - if (!res) { - LOGWARNING("Failed to send message in node_stratum"); - goto out; - } - if (read_socket_line(cs, &timeout) < 1) { - LOGWARNING("Failed to receive line in node_stratum"); - goto out; - } - /* Ignore err_val here since we should always get a result from an - * upstream server */ - val = json_msg_result(cs->buf, &res_val, &err_val); - if (!val || !res_val) { - LOGWARNING("Failed to get a json result in node_stratum, got: %s", - cs->buf); - goto out; - } - ret = json_is_true(res_val); - if (!ret) { - LOGWARNING("Denied node setup for stratum"); - goto out; - } - proxi->node = true; -out: - if (val) - json_decref(val); - if (!ret) - Close(cs->fd); - return ret; -} - -static void send_notify(ckpool_t *ckp, proxy_instance_t *proxi, notify_instance_t *ni); - -static void reconnect_generator(ckpool_t *ckp) -{ - send_proc(ckp->generator, "reconnect"); -} - -struct genwork *generator_getbase(ckpool_t *ckp) -{ - gdata_t *gdata = ckp->gdata; - gbtbase_t *gbt = NULL; - server_instance_t *si; - connsock_t *cs; - - /* Use temporary variables to prevent deref while accessing */ - si = gdata->current_si; - if (unlikely(!si)) { - LOGWARNING("No live current server in generator_genbase"); - goto out; - } - cs = &si->cs; - gbt = ckzalloc(sizeof(gbtbase_t)); - if (unlikely(!gen_gbtbase(cs, gbt))) { - LOGWARNING("Failed to get block template from %s:%s", cs->url, cs->port); - si->alive = cs->alive = false; - reconnect_generator(ckp); - dealloc(gbt); - } -out: - return gbt; -} - -int generator_getbest(ckpool_t *ckp, char *hash) -{ - gdata_t *gdata = ckp->gdata; - int ret = GETBEST_FAILED; - server_instance_t *si; - connsock_t *cs; - - si = gdata->current_si; - if (unlikely(!si)) { - LOGWARNING("No live current server in generator_getbest"); - goto out; - } - if (si->notify) { - ret = GETBEST_NOTIFY; - goto out; - } - cs = &si->cs; - if (unlikely(!get_bestblockhash(cs, hash))) { - LOGWARNING("Failed to get best block hash from %s:%s", cs->url, cs->port); - goto out; - } - ret = GETBEST_SUCCESS; -out: - return ret; -} - -bool generator_checkaddr(ckpool_t *ckp, const char *addr, bool *script, bool *segwit) -{ - gdata_t *gdata = ckp->gdata; - server_instance_t *si; - int ret = false; - connsock_t *cs; - - si = gdata->current_si; - if (unlikely(!si)) { - LOGWARNING("No live current server in generator_checkaddr"); - goto out; - } - cs = &si->cs; - ret = validate_address(cs, addr, script, segwit); -out: - return ret; -} - -bool generator_checktxn(const ckpool_t *ckp, const char *txn, json_t **val) -{ - gdata_t *gdata = ckp->gdata; - server_instance_t *si; - bool ret = false; - connsock_t *cs; - - si = gdata->current_si; - if (unlikely(!si)) { - LOGWARNING("No live current server in generator_checkaddr"); - goto out; - } - cs = &si->cs; - *val = validate_txn(cs, txn); - if (*val) - ret = true; -out: - return ret; -} - -char *generator_get_txn(ckpool_t *ckp, const char *hash) -{ - gdata_t *gdata = ckp->gdata; - server_instance_t *si; - char *ret = NULL; - connsock_t *cs; - - si = gdata->current_si; - if (unlikely(!si)) { - LOGWARNING("No live current server in generator_get_txn"); - goto out; - } - cs = &si->cs; - ret = get_txn(cs, hash); -out: - return ret; -} - -static bool parse_notify(ckpool_t *ckp, proxy_instance_t *proxi, json_t *val) -{ - const char *prev_hash, *bbversion, *nbit, *ntime; - gdata_t *gdata = proxi->ckp->gdata; - char *coinbase1, *coinbase2; - const char *jobidbuf; - bool clean, ret = false; - notify_instance_t *ni; - json_t *arr, *job_id; - int merkles, i; - - arr = json_array_get(val, 4); - if (!arr || !json_is_array(arr)) - goto out; - - merkles = json_array_size(arr); - job_id = json_copy(json_array_get(val, 0)); - prev_hash = __json_array_string(val, 1); - coinbase1 = json_array_string(val, 2); - coinbase2 = json_array_string(val, 3); - bbversion = __json_array_string(val, 5); - nbit = __json_array_string(val, 6); - ntime = __json_array_string(val, 7); - clean = json_is_true(json_array_get(val, 8)); - if (!job_id || !prev_hash || !coinbase1 || !coinbase2 || !bbversion || !nbit || !ntime) { - if (job_id) - json_decref(job_id); - if (coinbase1) - free(coinbase1); - if (coinbase2) - free(coinbase2); - goto out; - } - - LOGDEBUG("Received new notify from proxy %d:%d", proxi->id, proxi->subid); - ni = ckzalloc(sizeof(notify_instance_t)); - ni->jobid = job_id; - jobidbuf = json_string_value(job_id); - LOGDEBUG("JobID %s", jobidbuf); - ni->coinbase1 = coinbase1; - LOGDEBUG("Coinbase1 %s", coinbase1); - ni->coinb1len = strlen(coinbase1) / 2; - ni->coinbase2 = coinbase2; - LOGDEBUG("Coinbase2 %s", coinbase2); - memcpy(ni->prevhash, prev_hash, 65); - LOGDEBUG("Prevhash %s", prev_hash); - memcpy(ni->bbversion, bbversion, 9); - LOGDEBUG("BBVersion %s", bbversion); - memcpy(ni->nbit, nbit, 9); - LOGDEBUG("Nbit %s", nbit); - memcpy(ni->ntime, ntime, 9); - LOGDEBUG("Ntime %s", ntime); - ni->clean = clean; - LOGDEBUG("Clean %s", clean ? "true" : "false"); - LOGDEBUG("Merkles %d", merkles); - for (i = 0; i < merkles; i++) { - const char *merkle = __json_array_string(arr, i); - - LOGDEBUG("Merkle %d %s", i, merkle); - memcpy(&ni->merklehash[i][0], merkle, 65); - } - ni->merkles = merkles; - ret = true; - ni->notify_time = time(NULL); - - /* Add the notify instance to the parent proxy list, not the subproxy */ - mutex_lock(&gdata->notify_lock); - ni->id64 = gdata->proxy_notify_id++; - HASH_ADD_I64(gdata->notify_instances, id64, ni); - mutex_unlock(&gdata->notify_lock); - - send_notify(ckp, proxi, ni); -out: - return ret; -} - -static bool parse_diff(proxy_instance_t *proxi, json_t *val) -{ - double diff = json_number_value(json_array_get(val, 0)); - - if (diff == 0 || diff == proxi->diff) - return true; - proxi->diff = diff; - return true; -} - -static bool send_version(proxy_instance_t *proxi, json_t *val) -{ - json_t *json_msg, *id_val = json_object_dup(val, "id"); - bool ret; - - JSON_CPACK(json_msg, "{sossso}", "id", id_val, "result", PACKAGE"/"VERSION, - "error", json_null()); - ret = send_json_msg(&proxi->cs, json_msg); - json_decref(json_msg); - return ret; -} - -static bool show_message(json_t *val) -{ - const char *msg; - - if (!json_is_array(val)) - return false; - msg = json_string_value(json_array_get(val, 0)); - if (!msg) - return false; - LOGNOTICE("Pool message: %s", msg); - return true; -} - -static bool send_pong(proxy_instance_t *proxi, json_t *val) -{ - json_t *json_msg, *id_val = json_object_dup(val, "id"); - bool ret; - - JSON_CPACK(json_msg, "{sossso}", "id", id_val, "result", "pong", - "error", json_null()); - ret = send_json_msg(&proxi->cs, json_msg); - json_decref(json_msg); - return ret; -} - -static void prepare_proxy(proxy_instance_t *proxi); - -/* Creates a duplicate instance or proxi to be used as a subproxy, ignoring - * fields we don't use in the subproxy. */ -static proxy_instance_t *create_subproxy(ckpool_t *ckp, gdata_t *gdata, proxy_instance_t *proxi, - const char *url, const char *baseurl) -{ - proxy_instance_t *subproxy; - - mutex_lock(&gdata->lock); - if (gdata->dead_proxies) { - /* Recycle an old proxy instance if one exists */ - subproxy = gdata->dead_proxies; - DL_DELETE(gdata->dead_proxies, subproxy); - } else { - gdata->subproxies_generated++; - subproxy = ckzalloc(sizeof(proxy_instance_t)); - } - mutex_unlock(&gdata->lock); - - subproxy->cs.ckp = subproxy->ckp = ckp; - - mutex_lock(&proxi->proxy_lock); - subproxy->subid = ++proxi->subproxy_count; - mutex_unlock(&proxi->proxy_lock); - - subproxy->id = proxi->id; - subproxy->userid = proxi->userid; - subproxy->global = proxi->global; - subproxy->url = strdup(url); - subproxy->baseurl = strdup(baseurl); - subproxy->auth = strdup(proxi->auth); - subproxy->pass = strdup(proxi->pass); - subproxy->parent = proxi; - subproxy->epfd = proxi->epfd; - cksem_init(&subproxy->cs.sem); - cksem_post(&subproxy->cs.sem); - return subproxy; -} - -static void add_subproxy(proxy_instance_t *proxi, proxy_instance_t *subproxy) -{ - mutex_lock(&proxi->proxy_lock); - HASH_ADD(sh, proxi->subproxies, subid, sizeof(int), subproxy); - mutex_unlock(&proxi->proxy_lock); -} - -static proxy_instance_t *__subproxy_by_id(proxy_instance_t *proxy, const int subid) -{ - proxy_instance_t *subproxy; - - HASH_FIND(sh, proxy->subproxies, &subid, sizeof(int), subproxy); - return subproxy; -} - -/* Add to the dead list to be recycled if possible */ -static void store_proxy(gdata_t *gdata, proxy_instance_t *proxy) -{ - LOGINFO("Recycling data from proxy %d:%d", proxy->id, proxy->subid); - - mutex_lock(&gdata->lock); - dealloc(proxy->enonce1); - dealloc(proxy->url); - dealloc(proxy->baseurl); - dealloc(proxy->auth); - dealloc(proxy->pass); - memset(proxy, 0, sizeof(proxy_instance_t)); - DL_APPEND(gdata->dead_proxies, proxy); - mutex_unlock(&gdata->lock); -} - -/* The difference between a dead proxy and a deleted one is the parent proxy entry - * is not removed from the stratifier as it assumes it is down whereas a deleted - * proxy has had its entry removed from the generator. */ -static void send_stratifier_deadproxy(ckpool_t *ckp, const int id, const int subid) -{ - char buf[256]; - - if (ckp->passthrough) - return; - sprintf(buf, "deadproxy=%d:%d", id, subid); - send_proc(ckp->stratifier, buf); -} - -static void send_stratifier_delproxy(ckpool_t *ckp, const int id, const int subid) -{ - char buf[256]; - - if (ckp->passthrough) - return; - sprintf(buf, "delproxy=%d:%d", id, subid); - send_proc(ckp->stratifier, buf); -} - -/* Close the subproxy socket if it's open and remove it from the epoll list */ -static void close_proxy_socket(proxy_instance_t *proxy, proxy_instance_t *subproxy) -{ - if (subproxy->cs.fd > 0) { - epoll_ctl(proxy->epfd, EPOLL_CTL_DEL, subproxy->cs.fd, NULL); - Close(subproxy->cs.fd); - } -} - -/* Remove the subproxy from the proxi list and put it on the dead list. - * Further use of the subproxy pointer may point to a new proxy but will not - * dereference. This will only disable subproxies so parent proxies need to - * have their disabled bool set manually. */ -static void disable_subproxy(gdata_t *gdata, proxy_instance_t *proxi, proxy_instance_t *subproxy) -{ - subproxy->alive = false; - send_stratifier_deadproxy(gdata->ckp, subproxy->id, subproxy->subid); - close_proxy_socket(proxi, subproxy); - if (parent_proxy(subproxy)) - return; - - subproxy->disabled = true; - - mutex_lock(&proxi->proxy_lock); - /* Make sure subproxy is still in the list */ - subproxy = __subproxy_by_id(proxi, subproxy->subid); - if (likely(subproxy)) - HASH_DELETE(sh, proxi->subproxies, subproxy); - mutex_unlock(&proxi->proxy_lock); - - if (subproxy) { - send_stratifier_deadproxy(gdata->ckp, subproxy->id, subproxy->subid); - store_proxy(gdata, subproxy); - } -} - -static bool parse_reconnect(proxy_instance_t *proxy, json_t *val) -{ - bool sameurl = false, ret = false; - ckpool_t *ckp = proxy->ckp; - gdata_t *gdata = ckp->gdata; - proxy_instance_t *parent; - const char *new_url; - int new_port; - char *url; - - new_url = json_string_value(json_array_get(val, 0)); - new_port = json_integer_value(json_array_get(val, 1)); - /* See if we have an invalid entry listing port as a string instead of - * integer and handle that. */ - if (!new_port) { - const char *newport_string = json_string_value(json_array_get(val, 1)); - - if (newport_string) - sscanf(newport_string, "%d", &new_port); - } - if (new_url && strlen(new_url) && new_port) { - char *dot_pool, *dot_reconnect; - int len; - - dot_pool = strchr(proxy->url, '.'); - if (!dot_pool) { - LOGWARNING("Denied stratum reconnect request from server without domain %s", - proxy->url); - goto out; - } - dot_reconnect = strchr(new_url, '.'); - if (!dot_reconnect) { - LOGWARNING("Denied stratum reconnect request to url without domain %s", - new_url); - goto out; - } - len = strlen(dot_reconnect); - if (strncmp(dot_pool, dot_reconnect, len)) { - LOGWARNING("Denied stratum reconnect request from %s to non-matching domain %s", - proxy->url, new_url); - goto out; - } - ASPRINTF(&url, "%s:%d", new_url, new_port); - } else { - url = strdup(proxy->url); - sameurl = true; - } - LOGINFO("Processing reconnect request to %s", url); - - ret = true; - parent = proxy->parent; - disable_subproxy(gdata, parent, proxy); - if (parent != proxy) { - /* If this is a subproxy we only need to create a new one if - * the url has changed. Otherwise automated recruiting will - * take care of creating one if needed. */ - if (!sameurl) - create_subproxy(ckp, gdata, parent, url, parent->baseurl); - goto out; - } - - proxy->reconnect = true; - LOGWARNING("Proxy %d:%s reconnect issue to %s, dropping existing connection", - proxy->id, proxy->url, url); - if (!sameurl) { - char *oldurl = proxy->url; - - proxy->url = url; - free(oldurl); - } else - free(url); -out: - return ret; -} - -static void send_diff(ckpool_t *ckp, proxy_instance_t *proxi) -{ - proxy_instance_t *proxy = proxi->parent; - json_t *json_msg; - char *msg, *buf; - - /* Not set yet */ - if (!proxi->diff) - return; - - JSON_CPACK(json_msg, "{sIsisf}", - "proxy", proxy->id, - "subproxy", proxi->subid, - "diff", proxi->diff); - msg = json_dumps(json_msg, JSON_NO_UTF8); - json_decref(json_msg); - ASPRINTF(&buf, "diff=%s", msg); - free(msg); - send_proc(ckp->stratifier, buf); - free(buf); -} - -static void send_notify(ckpool_t *ckp, proxy_instance_t *proxi, notify_instance_t *ni) -{ - proxy_instance_t *proxy = proxi->parent; - json_t *json_msg, *merkle_arr; - char *msg, *buf; - int i; - - merkle_arr = json_array(); - - for (i = 0; i < ni->merkles; i++) - json_array_append_new(merkle_arr, json_string(&ni->merklehash[i][0])); - /* Use our own jobid instead of the server's one for easy lookup */ - JSON_CPACK(json_msg, "{sIsisisssisssssosssssssb}", - "proxy", proxy->id, "subproxy", proxi->subid, - "jobid", ni->id64, "prevhash", ni->prevhash, "coinb1len", ni->coinb1len, - "coinbase1", ni->coinbase1, "coinbase2", ni->coinbase2, - "merklehash", merkle_arr, "bbversion", ni->bbversion, - "nbit", ni->nbit, "ntime", ni->ntime, - "clean", ni->clean); - - msg = json_dumps(json_msg, JSON_NO_UTF8); - json_decref(json_msg); - ASPRINTF(&buf, "notify=%s", msg); - free(msg); - send_proc(ckp->stratifier, buf); - free(buf); - - /* Send diff now as stratifier will not accept diff till it has a - * valid workbase */ - send_diff(ckp, proxi); -} - -static bool parse_method(ckpool_t *ckp, proxy_instance_t *proxi, const char *msg) -{ - json_t *val = NULL, *method, *err_val, *params; - json_error_t err; - bool ret = false; - const char *buf; - - if (!msg) - goto out; - memset(&err, 0, sizeof(err)); - val = json_loads(msg, 0, &err); - if (!val) { - if (proxi->global) { - LOGWARNING("JSON decode of proxy %d:%s msg %s failed(%d): %s", - proxi->id, proxi->url, msg, err.line, err.text); - } else { - LOGNOTICE("JSON decode of proxy %d:%s msg %s failed(%d): %s", - proxi->id, proxi->url, msg, err.line, err.text); - } - goto out; - } - - method = json_object_get(val, "method"); - if (!method) { - /* Likely a share, look for harmless unhandled methods in - * pool response */ - if (strstr(msg, "mining.suggest")) { - LOGINFO("Unhandled suggest_diff from proxy %d:%s", proxi->id, proxi->url); - ret = true; - } else - LOGDEBUG("Failed to find method in json for parse_method"); - goto out; - } - err_val = json_object_get(val, "error"); - params = json_object_get(val, "params"); - - if (err_val && !json_is_null(err_val)) { - char *ss; - - if (err_val) - ss = json_dumps(err_val, 0); - else - ss = strdup("(unknown reason)"); - - LOGINFO("JSON-RPC method decode failed: %s", ss); - free(ss); - goto out; - } - - if (!json_is_string(method)) { - LOGINFO("Method is not string in parse_method"); - goto out; - } - buf = json_string_value(method); - if (!buf || strlen(buf) < 1) { - LOGINFO("Invalid string for method in parse_method"); - goto out; - } - - LOGDEBUG("Proxy %d:%d received method %s", proxi->id, proxi->subid, buf); - if (cmdmatch(buf, "mining.notify")) { - ret = parse_notify(ckp, proxi, params); - goto out; - } - - if (cmdmatch(buf, "mining.set_difficulty")) { - ret = parse_diff(proxi, params); - if (likely(ret)) - send_diff(ckp, proxi); - goto out; - } - - if (cmdmatch(buf, "client.reconnect")) { - ret = parse_reconnect(proxi, params); - goto out; - } - - if (cmdmatch(buf, "client.get_version")) { - ret = send_version(proxi, val); - goto out; - } - - if (cmdmatch(buf, "client.show_message")) { - ret = show_message(params); - goto out; - } - - if (cmdmatch(buf, "mining.ping")) { - ret = send_pong(proxi, val); - goto out; - } -out: - if (val) - json_decref(val); - return ret; -} - -/* cs semaphore must be held */ -static bool auth_stratum(ckpool_t *ckp, connsock_t *cs, proxy_instance_t *proxi) -{ - json_t *val = NULL, *res_val, *req, *err_val; - char *buf = NULL; - bool ret; - - JSON_CPACK(req, "{s:i,s:s,s:[s,s]}", - "id", 42, - "method", "mining.authorize", - "params", proxi->auth, proxi->pass); - ret = send_json_msg(cs, req); - json_decref(req); - if (!ret) { - LOGNOTICE("Proxy %d:%d %s failed to send message in auth_stratum", - proxi->id, proxi->subid, proxi->url); - if (cs->fd > 0) { - epoll_ctl(proxi->epfd, EPOLL_CTL_DEL, cs->fd, NULL); - Close(cs->fd); - } - goto out; - } - - /* Read and parse any extra methods sent. Anything left in the buffer - * should be the response to our auth request. */ - do { - free(buf); - buf = next_proxy_line(cs, proxi); - if (!buf) { - LOGNOTICE("Proxy %d:%d %s failed to receive line in auth_stratum", - proxi->id, proxi->subid, proxi->url); - ret = false; - goto out; - } - ret = parse_method(ckp, proxi, buf); - } while (ret); - - val = json_msg_result(buf, &res_val, &err_val); - if (!val) { - if (proxi->global) { - LOGWARNING("Proxy %d:%d %s failed to get a json result in auth_stratum, got: %s", - proxi->id, proxi->subid, proxi->url, buf); - } else { - LOGNOTICE("Proxy %d:%d %s failed to get a json result in auth_stratum, got: %s", - proxi->id, proxi->subid, proxi->url, buf); - } - goto out; - } - - if (err_val && !json_is_null(err_val)) { - LOGWARNING("Proxy %d:%d %s failed to authorise in auth_stratum due to err_val, got: %s", - proxi->id, proxi->subid, proxi->url, buf); - goto out; - } - if (res_val) { - ret = json_is_true(res_val); - if (!ret) { - if (proxi->global) { - LOGWARNING("Proxy %d:%d %s failed to authorise in auth_stratum, got: %s", - proxi->id, proxi->subid, proxi->url, buf); - } else { - LOGNOTICE("Proxy %d:%d %s failed to authorise in auth_stratum, got: %s", - proxi->id, proxi->subid, proxi->url, buf); - } - goto out; - } - } else { - /* No result and no error but successful val means auth success */ - ret = true; - } - LOGINFO("Proxy %d:%d %s auth success in auth_stratum", proxi->id, proxi->subid, proxi->url); -out: - if (val) - json_decref(val); - if (ret) { - /* Now parse any cached responses so there are none in the - * queue and they can be managed one at a time from now on. */ - while(42) { - dealloc(buf); - buf = cached_proxy_line(proxi); - if (!buf) - break; - parse_method(ckp, proxi, buf); - }; - } - return ret; -} - -static proxy_instance_t *proxy_by_id(gdata_t *gdata, const int id) -{ - proxy_instance_t *proxi; - - mutex_lock(&gdata->lock); - HASH_FIND_INT(gdata->proxies, &id, proxi); - mutex_unlock(&gdata->lock); - - return proxi; -} - -static void send_subscribe(ckpool_t *ckp, proxy_instance_t *proxi) -{ - json_t *json_msg; - char *msg, *buf; - - JSON_CPACK(json_msg, "{ss,ss,ss,ss,sI,si,ss,si,sb,si}", - "baseurl", proxi->baseurl, - "url", proxi->url, "auth", proxi->auth, "pass", proxi->pass, - "proxy", proxi->id, "subproxy", proxi->subid, - "enonce1", proxi->enonce1, "nonce2len", proxi->nonce2len, - "global", proxi->global, "userid", proxi->userid); - msg = json_dumps(json_msg, JSON_NO_UTF8); - json_decref(json_msg); - ASPRINTF(&buf, "subscribe=%s", msg); - free(msg); - send_proc(ckp->stratifier, buf); - free(buf); -} - -static proxy_instance_t *subproxy_by_id(proxy_instance_t *proxy, const int subid) -{ - proxy_instance_t *subproxy; - - mutex_lock(&proxy->proxy_lock); - subproxy = __subproxy_by_id(proxy, subid); - mutex_unlock(&proxy->proxy_lock); - - return subproxy; -} - -static void drop_proxy(gdata_t *gdata, const char *buf) -{ - proxy_instance_t *proxy, *subproxy; - int id = -1, subid = -1; - - sscanf(buf, "dropproxy=%d:%d", &id, &subid); - if (unlikely(!subid)) { - LOGWARNING("Generator asked to drop parent proxy %d", id); - return; - } - proxy = proxy_by_id(gdata, id); - if (unlikely(!proxy)) { - LOGINFO("Generator asked to drop subproxy from non-existent parent %d", id); - return; - } - subproxy = subproxy_by_id(proxy, subid); - if (!subproxy) { - LOGINFO("Generator asked to drop non-existent subproxy %d:%d", id, subid); - return; - } - LOGNOTICE("Generator asked to drop proxy %d:%d", id, subid); - disable_subproxy(gdata, proxy, subproxy); -} - -static void stratifier_reconnect_client(ckpool_t *ckp, const int64_t id) -{ - char buf[256]; - - sprintf(buf, "reconnclient=%"PRId64, id); - send_proc(ckp->stratifier, buf); -} - -/* Add a share to the gdata share hashlist. Returns the share id */ -static int add_share(gdata_t *gdata, const int64_t client_id, const double diff) -{ - share_msg_t *share = ckzalloc(sizeof(share_msg_t)), *tmpshare; - time_t now; - int ret; - - share->submit_time = now = time(NULL); - share->client_id = client_id; - share->diff = diff; - - /* Add new share entry to the share hashtable. Age old shares */ - mutex_lock(&gdata->share_lock); - ret = share->id64 = gdata->share_id++; - HASH_ADD_I64(gdata->shares, id64, share); - HASH_ITER(hh, gdata->shares, share, tmpshare) { - if (share->submit_time < now - 120) { - HASH_DEL(gdata->shares, share); - free(share); - } - } - mutex_unlock(&gdata->share_lock); - - return ret; -} - -static void submit_share(gdata_t *gdata, json_t *val) -{ - proxy_instance_t *proxy, *proxi; - ckpool_t *ckp = gdata->ckp; - int id, subid, share_id; - bool success = false; - stratum_msg_t *msg; - int64_t client_id; - - /* Get the client id so we can tell the stratifier to drop it if the - * proxy it's bound to is not functional */ - if (unlikely(!json_get_int64(&client_id, val, "client_id"))) { - LOGWARNING("Got no client_id in share"); - goto out; - } - if (unlikely(!json_get_int(&id, val, "proxy"))) { - LOGWARNING("Got no proxy in share"); - goto out; - } - if (unlikely(!json_get_int(&subid, val, "subproxy"))) { - LOGWARNING("Got no subproxy in share"); - goto out; - } - proxy = proxy_by_id(gdata, id); - if (unlikely(!proxy)) { - LOGINFO("Client %"PRId64" sending shares to non existent proxy %d, dropping", - client_id, id); - stratifier_reconnect_client(ckp, client_id); - goto out; - } - proxi = subproxy_by_id(proxy, subid); - if (unlikely(!proxi)) { - LOGINFO("Client %"PRId64" sending shares to non existent subproxy %d:%d, dropping", - client_id, id, subid); - stratifier_reconnect_client(ckp, client_id); - goto out; - } - if (!proxi->alive) { - LOGINFO("Client %"PRId64" sending shares to dead subproxy %d:%d, dropping", - client_id, id, subid); - stratifier_reconnect_client(ckp, client_id); - goto out; - } - - success = true; - msg = ckzalloc(sizeof(stratum_msg_t)); - msg->json_msg = val; - share_id = add_share(gdata, client_id, proxi->diff); - json_set_int(val, "id", share_id); - - /* Add the new message to the psend list */ - mutex_lock(&gdata->psend_lock); - gdata->psends_generated++; - DL_APPEND(gdata->psends, msg); - pthread_cond_signal(&gdata->psend_cond); - mutex_unlock(&gdata->psend_lock); - -out: - if (!success) - json_decref(val); -} - -static void clear_notify(notify_instance_t *ni) -{ - if (ni->jobid) - json_decref(ni->jobid); - free(ni->coinbase1); - free(ni->coinbase2); - free(ni); -} - -/* Entered with proxy_lock held */ -static void __decay_proxy(proxy_instance_t *proxy, proxy_instance_t * parent, const double diff) -{ - double tdiff; - tv_t now_t; - - tv_time(&now_t); - tdiff = sane_tdiff(&now_t, &proxy->last_decay); - decay_time(&proxy->dsps1, diff, tdiff, MIN1); - decay_time(&proxy->dsps5, diff, tdiff, MIN5); - decay_time(&proxy->dsps60, diff, tdiff, HOUR); - decay_time(&proxy->dsps1440, diff, tdiff, DAY); - copy_tv(&proxy->last_decay, &now_t); - - tdiff = sane_tdiff(&now_t, &parent->total_last_decay); - decay_time(&parent->tdsps1, diff, tdiff, MIN1); - decay_time(&parent->tdsps5, diff, tdiff, MIN5); - decay_time(&parent->tdsps60, diff, tdiff, HOUR); - decay_time(&parent->tdsps1440, diff, tdiff, DAY); - copy_tv(&parent->total_last_decay, &now_t); -} - -static void account_shares(proxy_instance_t *proxy, const double diff, const bool result) -{ - proxy_instance_t *parent = proxy->parent; - - mutex_lock(&parent->proxy_lock); - if (result) { - proxy->diff_accepted += diff; - parent->total_accepted += diff; - __decay_proxy(proxy, parent, diff); - } else { - proxy->diff_rejected += diff; - parent->total_rejected += diff; - __decay_proxy(proxy, parent, 0); - } - mutex_unlock(&parent->proxy_lock); -} - -/* Returns zero if it is not recognised as a share, 1 if it is a valid share - * and -1 if it is recognised as a share but invalid. */ -static int parse_share(gdata_t *gdata, proxy_instance_t *proxi, const char *buf) -{ - json_t *val = NULL, *idval; - bool result = false; - share_msg_t *share; - int ret = 0; - int64_t id; - - val = json_loads(buf, 0, NULL); - if (unlikely(!val)) { - LOGINFO("Failed to parse upstream json msg: %s", buf); - goto out; - } - idval = json_object_get(val, "id"); - if (unlikely(!idval)) { - LOGINFO("Failed to find id in upstream json msg: %s", buf); - goto out; - } - id = json_integer_value(idval); - if (unlikely(!json_get_bool(&result, val, "result"))) { - LOGINFO("Failed to find result in upstream json msg: %s", buf); - goto out; - } - - mutex_lock(&gdata->share_lock); - HASH_FIND_I64(gdata->shares, &id, share); - if (share) { - HASH_DEL(gdata->shares, share); - free(share); - } - mutex_unlock(&gdata->share_lock); - - if (!share) { - LOGINFO("Proxy %d:%d failed to find matching share to result: %s", - proxi->id, proxi->subid, buf); - /* We don't know what diff these shares are so assume the - * current proxy diff. */ - account_shares(proxi, proxi->diff, result); - ret = -1; - goto out; - } - ret = 1; - account_shares(proxi, share->diff, result); - LOGINFO("Proxy %d:%d share result %s from client %"PRId64, proxi->id, proxi->subid, - buf, share->client_id); - free(share); -out: - if (val) - json_decref(val); - return ret; -} - -struct cs_msg { - cs_msg_t *next; - cs_msg_t *prev; - proxy_instance_t *proxy; - char *buf; - int len; - int ofs; -}; - -/* Sends all messages in the queue ready to be dispatched, leaving those that - * would block to be handled next pass */ -static void send_json_msgq(gdata_t *gdata, cs_msg_t **csmsgq) -{ - cs_msg_t *csmsg, *tmp; - int ret; - - DL_FOREACH_SAFE(*csmsgq, csmsg, tmp) { - proxy_instance_t *proxy = csmsg->proxy; - - /* Only try to send one message at a time to each proxy - * to avoid sending parts of different messages */ - if (proxy->sending && proxy->sending != csmsg) - continue; - while (csmsg->len > 0) { - int fd; - - if (unlikely(!proxy->alive)) { - LOGDEBUG("Dropping send message to dead proxy %d:%d in send_json_msgq", - proxy->id, proxy->subid); - csmsg->len = 0; - break; - } - proxy->sending = csmsg; - fd = proxy->cs.fd; - ret = send(fd, csmsg->buf + csmsg->ofs, csmsg->len, MSG_DONTWAIT); - if (ret < 1) { - if (!ret) - break; - ret = 0; - if (errno == EAGAIN || errno == EWOULDBLOCK) - break; - csmsg->len = 0; - LOGNOTICE("Proxy %d:%d %s failed to send msg in send_json_msgq, dropping", - proxy->id, proxy->subid, proxy->url); - disable_subproxy(gdata, proxy->parent, proxy); - } - csmsg->ofs += ret; - csmsg->len -= ret; - } - if (csmsg->len < 1) { - proxy->sending = NULL; - DL_DELETE(*csmsgq, csmsg); - free(csmsg->buf); - free(csmsg); - } - } -} - -static void add_json_msgq(cs_msg_t **csmsgq, proxy_instance_t *proxy, json_t **val) -{ - cs_msg_t *csmsg = ckzalloc(sizeof(cs_msg_t)); - - csmsg->buf = json_dumps(*val, JSON_ESCAPE_SLASH | JSON_EOL); - json_decref(*val); - *val = NULL; - if (unlikely(!csmsg->buf)) { - LOGWARNING("Failed to create json dump in add_json_msgq"); - return; - } - csmsg->len = strlen(csmsg->buf); - csmsg->proxy = proxy; - DL_APPEND(*csmsgq, csmsg); -} - -/* For processing and sending shares. proxy refers to parent proxy here */ -static void *proxy_send(void *arg) -{ - ckpool_t *ckp = (ckpool_t *)arg; - gdata_t *gdata = ckp->gdata; - stratum_msg_t *msg = NULL; - cs_msg_t *csmsgq = NULL; - - rename_proc("proxysend"); - - pthread_detach(pthread_self()); - - while (42) { - proxy_instance_t *proxy, *subproxy; - int proxyid = 0, subid = 0; - int64_t client_id = 0, id; - notify_instance_t *ni; - json_t *jobid = NULL; - json_t *val; - - if (unlikely(msg)) { - json_decref(msg->json_msg); - free(msg); - } - - mutex_lock(&gdata->psend_lock); - if (!gdata->psends) { - /* Poll every 10ms */ - const ts_t polltime = {0, 10000000}; - ts_t timeout_ts; - - ts_realtime(&timeout_ts); - timeraddspec(&timeout_ts, &polltime); - cond_timedwait(&gdata->psend_cond, &gdata->psend_lock, &timeout_ts); - } - msg = gdata->psends; - if (likely(msg)) - DL_DELETE(gdata->psends, msg); - mutex_unlock(&gdata->psend_lock); - - if (!msg) { - send_json_msgq(gdata, &csmsgq); - continue; - } - - if (unlikely(!json_get_int(&subid, msg->json_msg, "subproxy"))) { - LOGWARNING("Failed to find subproxy in proxy_send msg"); - continue; - } - if (unlikely(!json_get_int64(&id, msg->json_msg, "jobid"))) { - LOGWARNING("Failed to find jobid in proxy_send msg"); - continue; - } - if (unlikely(!json_get_int(&proxyid, msg->json_msg, "proxy"))) { - LOGWARNING("Failed to find proxy in proxy_send msg"); - continue; - } - if (unlikely(!json_get_int64(&client_id, msg->json_msg, "client_id"))) { - LOGWARNING("Failed to find client_id in proxy_send msg"); - continue; - } - proxy = proxy_by_id(gdata, proxyid); - if (unlikely(!proxy)) { - LOGWARNING("Proxysend for got message for non-existent proxy %d", - proxyid); - continue; - } - subproxy = subproxy_by_id(proxy, subid); - if (unlikely(!subproxy)) { - LOGWARNING("Proxysend for got message for non-existent subproxy %d:%d", - proxyid, subid); - continue; - } - - mutex_lock(&gdata->notify_lock); - HASH_FIND_INT(gdata->notify_instances, &id, ni); - if (ni) - jobid = json_copy(ni->jobid); - mutex_unlock(&gdata->notify_lock); - - if (unlikely(!jobid)) { - stratifier_reconnect_client(ckp, client_id); - LOGNOTICE("Proxy %d:%s failed to find matching jobid in proxysend", - subproxy->id, subproxy->url); - continue; - } - - JSON_CPACK(val, "{s[soooo]soss}", "params", subproxy->auth, jobid, - json_object_dup(msg->json_msg, "nonce2"), - json_object_dup(msg->json_msg, "ntime"), - json_object_dup(msg->json_msg, "nonce"), - "id", json_object_dup(msg->json_msg, "id"), - "method", "mining.submit"); - add_json_msgq(&csmsgq, subproxy, &val); - send_json_msgq(gdata, &csmsgq); - } - return NULL; -} - -static void passthrough_send(ckpool_t *ckp, pass_msg_t *pm) -{ - proxy_instance_t *proxy = pm->proxy; - connsock_t *cs = pm->cs; - int len, sent; - - if (unlikely(!proxy->alive || cs->fd < 0)) { - LOGDEBUG("Dropping send to dead proxy of upstream json msg: %s", pm->msg); - goto out; - } - LOGDEBUG("Sending upstream json msg: %s", pm->msg); - len = strlen(pm->msg); - sent = write_socket(cs->fd, pm->msg, len); - if (unlikely(sent != len)) { - LOGWARNING("Failed to passthrough %d bytes of message %s, attempting reconnect", - len, pm->msg); - Close(cs->fd); - proxy->alive = false; - reconnect_generator(ckp); - } -out: - free(pm->msg); - free(pm); -} - -static void passthrough_add_send(proxy_instance_t *proxy, char *msg) -{ - pass_msg_t *pm = ckzalloc(sizeof(pass_msg_t)); - - pm->proxy = proxy; - pm->cs = &proxy->cs; - pm->msg = msg; - ckmsgq_add(proxy->passsends, pm); -} - -void generator_add_send(ckpool_t *ckp, json_t *val) -{ - gdata_t *gdata = ckp->gdata; - char *buf; - - if (!ckp->passthrough) { - submit_share(gdata, val); - return; - } - if (unlikely(!gdata->current_proxy)) { - LOGWARNING("No current proxy to send passthrough data to"); - goto out; - } - buf = json_dumps(val, JSON_COMPACT | JSON_EOL); - if (unlikely(!buf)) { - LOGWARNING("Unable to decode json in generator_add_send"); - goto out; - } - passthrough_add_send(gdata->current_proxy, buf); -out: - json_decref(val); -} - -static void suggest_diff(ckpool_t *ckp, connsock_t *cs, proxy_instance_t *proxy) -{ - json_t *req; - bool ret; - - JSON_CPACK(req, "{s:i,s:s, s:[I]}", - "id", 41, - "method", "mining.suggest", - "params", ckp->mindiff); - ret = send_json_msg(cs, req); - json_decref(req); - if (!ret) { - LOGNOTICE("Proxy %d:%d %s failed to send message in suggest_diff", - proxy->id, proxy->subid, proxy->url); - if (cs->fd > 0) { - epoll_ctl(proxy->epfd, EPOLL_CTL_DEL, cs->fd, NULL); - Close(cs->fd); - } - } - /* We don't care about the response here. It can get filtered out later - * if it fails upstream. */ -} - -/* Upon failing connnect, subscribe, or auth, back off on the next attempt. - * This function should be called on the parent proxy */ -static void proxy_backoff(proxy_instance_t *proxy) -{ - /* Add 5 seconds with each backoff, up to maximum of 1 minute */ - if (proxy->backoff < 60) - proxy->backoff += 5; -} - -static bool proxy_alive(ckpool_t *ckp, proxy_instance_t *proxi, connsock_t *cs, - bool pinging) -{ - proxy_instance_t *parent = proxi->parent; - bool ret = false; - - /* Has this proxy already been reconnected? */ - if (proxi->alive) - return true; - if (proxi->disabled) - return false; - - /* Serialise all send/recvs here with the cs semaphore */ - cksem_wait(&cs->sem); - /* Check again after grabbing semaphore */ - if (unlikely(proxi->alive)) { - ret = true; - goto out; - } - if (!extract_sockaddr(proxi->url, &cs->url, &cs->port)) { - LOGWARNING("Failed to extract address from %s", proxi->url); - goto out; - } - if (!connect_proxy(ckp, cs, proxi)) { - if (!pinging) { - LOGINFO("Failed to connect to %s:%s in proxy_mode!", - cs->url, cs->port); - } - parent->connect_status = STATUS_FAIL; - proxy_backoff(parent); - goto out; - } - parent->connect_status = STATUS_SUCCESS; - - if (ckp->node) { - if (!node_stratum(cs, proxi)) { - LOGWARNING("Failed initial node setup to %s:%s !", - cs->url, cs->port); - goto out; - } - ret = true; - goto out; - } - if (ckp->passthrough) { - if (!passthrough_stratum(cs, proxi)) { - LOGWARNING("Failed initial passthrough to %s:%s !", - cs->url, cs->port); - goto out; - } - ret = true; - goto out; - } - /* Test we can connect, authorise and get stratum information */ - if (!subscribe_stratum(ckp, cs, proxi)) { - if (!pinging) { - LOGWARNING("Failed initial subscribe to %s:%s !", - cs->url, cs->port); - } - parent->subscribe_status = STATUS_FAIL; - proxy_backoff(parent); - goto out; - } - parent->subscribe_status = STATUS_SUCCESS; - - if (!ckp->passthrough) - send_subscribe(ckp, proxi); - if (!auth_stratum(ckp, cs, proxi)) { - if (!pinging) { - LOGWARNING("Failed initial authorise to %s:%s with %s:%s !", - cs->url, cs->port, proxi->auth, proxi->pass); - } - parent->auth_status = STATUS_FAIL; - proxy_backoff(parent); - goto out; - } - parent->auth_status = STATUS_SUCCESS; - proxi->authorised = ret = true; - parent->backoff = 0; - if (ckp->mindiff > 1) - suggest_diff(ckp, cs, proxi); -out: - if (!ret) { - send_stratifier_deadproxy(ckp, proxi->id, proxi->subid); - /* Close and invalidate the file handle */ - Close(cs->fd); - } - proxi->alive = ret; - cksem_post(&cs->sem); - - /* Decrease the parent's recruit count after sending the stratifier the - * new subscribe so it can get an accurate headroom count before - * requesting more proxies. */ - if (ret) { - proxy_instance_t *parent = proxi->parent; - - if (parent) { - mutex_lock(&parent->proxy_lock); - parent->recruit -= proxi->clients_per_proxy; - if (parent->recruit < 0) - parent->recruit = 0; - mutex_unlock(&parent->proxy_lock); - } - } - - return ret; -} - -static void *proxy_recruit(void *arg) -{ - proxy_instance_t *proxy, *parent = (proxy_instance_t *)arg; - ckpool_t *ckp = parent->ckp; - gdata_t *gdata = ckp->gdata; - bool recruit, alive; - - pthread_detach(pthread_self()); - - /* We do this in a separate thread so it's okay to sleep here */ - if (parent->backoff) - sleep(parent->backoff); - -retry: - recruit = false; - proxy = create_subproxy(ckp, gdata, parent, parent->url, parent->baseurl); - alive = proxy_alive(ckp, proxy, &proxy->cs, false); - if (!alive) { - LOGNOTICE("Subproxy failed proxy_alive testing"); - store_proxy(gdata, proxy); - } else - add_subproxy(parent, proxy); - - mutex_lock(&parent->proxy_lock); - if (alive && parent->recruit > 0) - recruit = true; - else /* Reset so the next request will try again */ - parent->recruit = 0; - mutex_unlock(&parent->proxy_lock); - - if (recruit) - goto retry; - - return NULL; -} - -static void recruit_subproxies(proxy_instance_t *proxi, const int recruits) -{ - bool recruit = false; - pthread_t pth; - - mutex_lock(&proxi->proxy_lock); - if (!proxi->recruit) - recruit = true; - if (proxi->recruit < recruits) - proxi->recruit = recruits; - mutex_unlock(&proxi->proxy_lock); - - if (recruit) - create_pthread(&pth, proxy_recruit, proxi); -} - -/* Queue up to the requested amount */ -static void recruit_subproxy(gdata_t *gdata, const char *buf) -{ - int recruits = 1, id = 0; - proxy_instance_t *proxy; - - sscanf(buf, "recruit=%d:%d", &id, &recruits); - proxy = proxy_by_id(gdata, id); - if (unlikely(!proxy)) { - LOGNOTICE("Generator failed to find proxy id %d to recruit subproxies", - id); - return; - } - recruit_subproxies(proxy, recruits); -} - -static void *proxy_reconnect(void *arg) -{ - proxy_instance_t *proxy = (proxy_instance_t *)arg; - connsock_t *cs = &proxy->cs; - ckpool_t *ckp = proxy->ckp; - - pthread_detach(pthread_self()); - if (proxy->parent->backoff) - sleep(proxy->parent->backoff); - proxy_alive(ckp, proxy, cs, true); - proxy->reconnecting = false; - return NULL; -} - -/* For reconnecting the parent proxy instance async */ -static void reconnect_proxy(proxy_instance_t *proxi) -{ - pthread_t pth; - - if (proxi->reconnecting) - return; - proxi->reconnecting = true; - create_pthread(&pth, proxy_reconnect, proxi); -} - -/* For receiving messages from an upstream pool to pass downstream. Responsible - * for setting up the connection and testing pool is live. */ -static void *passthrough_recv(void *arg) -{ - proxy_instance_t *proxi = (proxy_instance_t *)arg; - connsock_t *cs = &proxi->cs; - ckpool_t *ckp = proxi->ckp; - bool alive; - - rename_proc("passrecv"); - - proxi->parent = proxi; - if (proxy_alive(ckp, proxi, cs, false)) - LOGWARNING("Passthrough proxy %d:%s connection established", proxi->id, proxi->url); - alive = proxi->alive; - - while (42) { - float timeout = 5; - int ret; - - while (!proxy_alive(ckp, proxi, cs, true)) { - alive = false; - sleep(5); - } - if (!alive) { - reconnect_generator(ckp); - LOGWARNING("Passthrough %d:%s recovered", proxi->id, proxi->url); - alive = true; - } - - cksem_wait(&cs->sem); - ret = read_socket_line(cs, &timeout); - /* Simply forward the message on, as is, to the connector to - * process. Possibly parse parameters sent by upstream pool - * here */ - if (likely(ret > 0)) { - LOGDEBUG("Passthrough recv received upstream msg: %s", cs->buf); - send_proc(ckp->connector, cs->buf); - } else if (ret < 0) { - /* Read failure */ - LOGWARNING("Passthrough %d:%s failed to read_socket_line in passthrough_recv, attempting reconnect", - proxi->id, proxi->url); - alive = proxi->alive = false; - Close(cs->fd); - reconnect_generator(ckp); - } else /* No messages during timeout */ - LOGDEBUG("Passthrough %d:%s no messages received", proxi->id, proxi->url); - cksem_post(&cs->sem); - } - return NULL; -} - -static bool subproxies_alive(proxy_instance_t *proxy) -{ - proxy_instance_t *subproxy, *tmp; - bool ret = false; - - mutex_lock(&proxy->proxy_lock); - HASH_ITER(sh, proxy->subproxies, subproxy, tmp) { - if (subproxy->alive) { - ret = true; - break; - } - } - mutex_unlock(&proxy->proxy_lock); - - return ret; -} - -/* For receiving messages from the upstream proxy, also responsible for setting - * up the connection and testing it's alive. */ -static void *proxy_recv(void *arg) -{ - proxy_instance_t *proxi = (proxy_instance_t *)arg; - connsock_t *cs = &proxi->cs; - proxy_instance_t *subproxy; - ckpool_t *ckp = proxi->ckp; - gdata_t *gdata = ckp->gdata; - struct epoll_event event; - bool alive; - int epfd; - - rename_proc("proxyrecv"); - pthread_detach(pthread_self()); - - proxi->epfd = epfd = epoll_create1(EPOLL_CLOEXEC); - if (epfd < 0){ - LOGEMERG("FATAL: Failed to create epoll in proxyrecv"); - return NULL; - } - - if (proxy_alive(ckp, proxi, cs, false)) - LOGWARNING("Proxy %d:%s connection established", proxi->id, proxi->url); - - alive = proxi->alive; - - while (42) { - bool message = false, hup = false; - share_msg_t *share, *tmpshare; - notify_instance_t *ni, *tmp; - float timeout; - time_t now; - int ret; - - subproxy = proxi; - if (!proxi->alive) { - reconnect_proxy(proxi); - while (!subproxies_alive(proxi)) { - reconnect_proxy(proxi); - if (alive) { - reconnect_generator(ckp); - LOGWARNING("Proxy %d:%s failed, attempting reconnect", - proxi->id, proxi->url); - alive = false; - } - sleep(5); - } - } - if (!alive) { - reconnect_generator(ckp); - LOGWARNING("Proxy %d:%s recovered", proxi->id, proxi->url); - alive = true; - } - - now = time(NULL); - - /* Age old notifications older than 10 mins old */ - mutex_lock(&gdata->notify_lock); - HASH_ITER(hh, gdata->notify_instances, ni, tmp) { - if (HASH_COUNT(gdata->notify_instances) < 3) - break; - if (ni->notify_time < now - 600) { - HASH_DEL(gdata->notify_instances, ni); - clear_notify(ni); - } - } - mutex_unlock(&gdata->notify_lock); - - /* Similary with shares older than 2 mins without response */ - mutex_lock(&gdata->share_lock); - HASH_ITER(hh, gdata->shares, share, tmpshare) { - if (share->submit_time < now - 120) { - HASH_DEL(gdata->shares, share); - free(share); - } - } - mutex_unlock(&gdata->share_lock); - - cs = NULL; - /* If we don't get an update within 10 minutes the upstream pool - * has likely stopped responding. */ - ret = epoll_wait(epfd, &event, 1, 600000); - if (likely(ret > 0)) { - subproxy = event.data.ptr; - cs = &subproxy->cs; - if (!subproxy->alive) { - cs = NULL; - continue; - } - - /* Serialise messages from here once we have a cs by - * holding the semaphore. */ - cksem_wait(&cs->sem); - /* Process any messages before checking for errors in - * case a message is sent and then the socket - * immediately closed. - */ - if (event.events & EPOLLIN) { - timeout = 30; - ret = read_socket_line(cs, &timeout); - /* If we are unable to read anything within 30 - * seconds at this point after EPOLLIN is set - * then the socket is dead. */ - if (ret < 1) { - LOGNOTICE("Proxy %d:%d %s failed to read_socket_line in proxy_recv", - proxi->id, subproxy->subid, subproxy->url); - hup = true; - } else { - message = true; - timeout = 0; - } - } - if (event.events & (EPOLLHUP | EPOLLERR | EPOLLRDHUP)) { - LOGNOTICE("Proxy %d:%d %s epoll hangup in proxy_recv", - proxi->id, subproxy->subid, subproxy->url); - hup = true; - } - } else { - LOGNOTICE("Proxy %d:%d %s failed to epoll in proxy_recv", - proxi->id, subproxy->subid, subproxy->url); - hup = true; - } - - /* Parse any other messages already fully buffered with a zero - * timeout. */ - while (message || read_socket_line(cs, &timeout) > 0) { - message = false; - timeout = 0; - /* subproxy may have been recycled here if it is not a - * parent and reconnect was issued */ - if (parse_method(ckp, subproxy, cs->buf)) - continue; - /* If it's not a method it should be a share result */ - if (!parse_share(gdata, subproxy, cs->buf)) { - LOGNOTICE("Proxy %d:%d unhandled stratum message: %s", - subproxy->id, subproxy->subid, cs->buf); - } - } - - /* Process hangup only after parsing messages */ - if (hup) - disable_subproxy(gdata, proxi, subproxy); - if (cs) - cksem_post(&cs->sem); - } - - return NULL; -} - -/* Thread that handles all received messages from user proxies */ -static void *userproxy_recv(void *arg) -{ - ckpool_t *ckp = (ckpool_t *)arg; - gdata_t *gdata = ckp->gdata; - struct epoll_event event; - int epfd; - - rename_proc("uproxyrecv"); - pthread_detach(pthread_self()); - - epfd = epoll_create1(EPOLL_CLOEXEC); - if (epfd < 0){ - LOGEMERG("FATAL: Failed to create epoll in userproxy_recv"); - return NULL; - } - - while (42) { - proxy_instance_t *proxy, *tmpproxy; - bool message = false, hup = false; - share_msg_t *share, *tmpshare; - notify_instance_t *ni, *tmp; - connsock_t *cs; - float timeout; - time_t now; - int ret; - - mutex_lock(&gdata->lock); - HASH_ITER(hh, gdata->proxies, proxy, tmpproxy) { - if (!proxy->global && !proxy->alive) { - proxy->epfd = epfd; - reconnect_proxy(proxy); - } - } - mutex_unlock(&gdata->lock); - - ret = epoll_wait(epfd, &event, 1, 1000); - if (ret < 1) { - if (likely(!ret)) - continue; - LOGEMERG("Failed to epoll_wait in userproxy_recv"); - break; - } - proxy = event.data.ptr; - /* Make sure we haven't popped this off before we've finished - * subscribe/auth */ - if (unlikely(!proxy->authorised)) - continue; - - now = time(NULL); - - mutex_lock(&gdata->notify_lock); - HASH_ITER(hh, gdata->notify_instances, ni, tmp) { - if (HASH_COUNT(gdata->notify_instances) < 3) - break; - if (ni->notify_time < now - 600) { - HASH_DEL(gdata->notify_instances, ni); - clear_notify(ni); - } - } - mutex_unlock(&gdata->notify_lock); - - /* Similary with shares older than 2 mins without response */ - mutex_lock(&gdata->share_lock); - HASH_ITER(hh, gdata->shares, share, tmpshare) { - if (share->submit_time < now - 120) { - HASH_DEL(gdata->shares, share); - free(share); - } - } - mutex_unlock(&gdata->share_lock); - - cs = &proxy->cs; - -#if 0 - /* Is this needed at all? */ - if (!proxy->alive) - continue; -#endif - - if ((event.events & (EPOLLHUP | EPOLLERR | EPOLLRDHUP))) { - LOGNOTICE("Proxy %d:%d %s hangup in userproxy_recv", proxy->id, - proxy->subid, proxy->url); - hup = true; - } - - if (likely(event.events & EPOLLIN)) { - timeout = 30; - - cksem_wait(&cs->sem); - ret = read_socket_line(cs, &timeout); - /* If we are unable to read anything within 30 - * seconds at this point after EPOLLIN is set - * then the socket is dead. */ - if (ret < 1) { - LOGNOTICE("Proxy %d:%d %s failed to read_socket_line in userproxy_recv", - proxy->id, proxy->subid, proxy->url); - hup = true; - } else { - message = true; - timeout = 0; - } - while (message || (ret = read_socket_line(cs, &timeout)) > 0) { - message = false; - timeout = 0; - /* proxy may have been recycled here if it is not a - * parent and reconnect was issued */ - if (parse_method(ckp, proxy, cs->buf)) - continue; - /* If it's not a method it should be a share result */ - if (!parse_share(gdata, proxy, cs->buf)) { - LOGNOTICE("Proxy %d:%d unhandled stratum message: %s", - proxy->id, proxy->subid, cs->buf); - } - } - cksem_post(&cs->sem); - } - - if (hup) { - disable_subproxy(gdata, proxy->parent, proxy); - continue; - } - } - return NULL; -} - -static void prepare_proxy(proxy_instance_t *proxi) -{ - proxi->parent = proxi; - mutex_init(&proxi->proxy_lock); - add_subproxy(proxi, proxi); - if (proxi->global) - create_pthread(&proxi->pth_precv, proxy_recv, proxi); -} - -static proxy_instance_t *wait_best_proxy(ckpool_t *ckp, gdata_t *gdata) -{ - proxy_instance_t *ret = NULL, *proxi, *tmp; - int retries = 0; - - while (42) { - mutex_lock(&gdata->lock); - HASH_ITER(hh, gdata->proxies, proxi, tmp) { - if (proxi->disabled || !proxi->global) - continue; - if (proxi->alive || subproxies_alive(proxi)) { - if (!ret || proxi->id < ret->id) - ret = proxi; - } - } - mutex_unlock(&gdata->lock); - - if (ret) - break; - /* Send reject message if we are unable to find an active - * proxy for more than 5 seconds */ - if (!((retries++) % 5)) - send_proc(ckp->connector, "reject"); - sleep(1); - } - send_proc(ckp->connector, ret ? "accept" : "reject"); - return ret; -} - -static void send_list(gdata_t *gdata, const int sockd) -{ - proxy_instance_t *proxy, *tmp; - json_t *val, *array_val; - - array_val = json_array(); - - mutex_lock(&gdata->lock); - HASH_ITER(hh, gdata->proxies, proxy, tmp) { - JSON_CPACK(val, "{si,sb,si,ss,ss,sf,sb,sb,si}", - "id", proxy->id, "global", proxy->global, "userid", proxy->userid, - "auth", proxy->auth, "pass", proxy->pass, - "diff", proxy->diff, - "disabled", proxy->disabled, "alive", proxy->alive, - "subproxies", proxy->subproxy_count); - if (proxy->enonce1) { - json_set_string(val, "enonce1", proxy->enonce1); - json_set_int(val, "nonce1len", proxy->nonce1len); - json_set_int(val, "nonce2len", proxy->nonce2len); - } - json_array_append_new(array_val, val); - } - mutex_unlock(&gdata->lock); - - JSON_CPACK(val, "{so}", "proxies", array_val); - send_api_response(val, sockd); -} - -static void send_sublist(gdata_t *gdata, const int sockd, const char *buf) -{ - proxy_instance_t *proxy, *subproxy, *tmp; - json_t *val = NULL, *res = NULL, *array_val; - json_error_t err_val; - int64_t id; - - array_val = json_array(); - - val = json_loads(buf, 0, &err_val); - if (unlikely(!val)) { - res = json_encode_errormsg(&err_val); - goto out; - } - if (unlikely(!json_get_int64(&id, val, "id"))) { - res = json_errormsg("Failed to get ID in send_sublist JSON: %s", buf); - goto out; - } - proxy = proxy_by_id(gdata, id); - if (unlikely(!proxy)) { - res = json_errormsg("Failed to find proxy %"PRId64" in send_sublist", id); - goto out; - } - - mutex_lock(&gdata->lock); - HASH_ITER(sh, proxy->subproxies, subproxy, tmp) { - JSON_CPACK(res, "{si,ss,ss,sf,sb,sb}", - "subid", subproxy->id, - "auth", subproxy->auth, "pass", subproxy->pass, - "diff", subproxy->diff, - "disabled", subproxy->disabled, "alive", subproxy->alive); - if (subproxy->enonce1) { - json_set_string(res, "enonce1", subproxy->enonce1); - json_set_int(res, "nonce1len", subproxy->nonce1len); - json_set_int(res, "nonce2len", subproxy->nonce2len); - } - json_array_append_new(array_val, res); - } - mutex_unlock(&gdata->lock); - - JSON_CPACK(res, "{so}", "subproxies", array_val); -out: - if (val) - json_decref(val); - send_api_response(res, sockd); -} - -static proxy_instance_t *__add_proxy(ckpool_t *ckp, gdata_t *gdata, const int num); - -static proxy_instance_t *__add_userproxy(ckpool_t *ckp, gdata_t *gdata, const int id, - const int userid, char *url, char *auth, char *pass) -{ - proxy_instance_t *proxy; - - gdata->proxies_generated++; - proxy = ckzalloc(sizeof(proxy_instance_t)); - proxy->id = id; - proxy->userid = userid; - proxy->url = url; - proxy->baseurl = strdup(url); - proxy->auth = auth; - proxy->pass = pass; - proxy->ckp = proxy->cs.ckp = ckp; - cksem_init(&proxy->cs.sem); - cksem_post(&proxy->cs.sem); - HASH_ADD_INT(gdata->proxies, id, proxy); - return proxy; -} - -static void add_userproxy(ckpool_t *ckp, gdata_t *gdata, const int userid, - const char *url, const char *auth, const char *pass) -{ - proxy_instance_t *proxy; - char *newurl = strdup(url); - char *newauth = strdup(auth); - char *newpass = strdup(pass ? pass : ""); - int id; - - mutex_lock(&gdata->lock); - id = ckp->proxies++; - proxy = __add_userproxy(ckp, gdata, id, userid, newurl, newauth, newpass); - mutex_unlock(&gdata->lock); - - LOGWARNING("Adding non global user %s, %d proxy %d:%s", auth, userid, id, url); - prepare_proxy(proxy); -} - -static void parse_addproxy(ckpool_t *ckp, gdata_t *gdata, const int sockd, const char *buf) -{ - char *url = NULL, *auth = NULL, *pass = NULL; - json_t *val = NULL, *res = NULL; - proxy_instance_t *proxy; - json_error_t err_val; - int id, userid; - bool global; - - val = json_loads(buf, 0, &err_val); - if (unlikely(!val)) { - res = json_encode_errormsg(&err_val); - goto out; - } - json_get_string(&url, val, "url"); - json_get_string(&auth, val, "auth"); - json_get_string(&pass, val, "pass"); - if (json_get_int(&userid, val, "userid")) - global = false; - else - global = true; - if (unlikely(!url || !auth || !pass)) { - res = json_errormsg("Failed to decode url/auth/pass in addproxy %s", buf); - goto out; - } - - mutex_lock(&gdata->lock); - id = ckp->proxies++; - if (global) { - ckp->proxyurl = realloc(ckp->proxyurl, sizeof(char **) * ckp->proxies); - ckp->proxyauth = realloc(ckp->proxyauth, sizeof(char **) * ckp->proxies); - ckp->proxypass = realloc(ckp->proxypass, sizeof(char **) * ckp->proxies); - ckp->proxyurl[id] = url; - ckp->proxyauth[id] = auth; - ckp->proxypass[id] = pass; - proxy = __add_proxy(ckp, gdata, id); - } else - proxy = __add_userproxy(ckp, gdata, id, userid, url, auth, pass); - mutex_unlock(&gdata->lock); - - if (global) - LOGNOTICE("Adding global proxy %d:%s", id, proxy->url); - else - LOGNOTICE("Adding user %d proxy %d:%s", userid, id, proxy->url); - prepare_proxy(proxy); - if (global) { - JSON_CPACK(res, "{si,ss,ss,ss}", - "id", proxy->id, "url", url, "auth", auth, "pass", pass); - } else { - JSON_CPACK(res, "{si,ss,ss,ss,si}", - "id", proxy->id, "url", url, "auth", auth, "pass", pass, - "userid", proxy->userid); - } -out: - if (val) - json_decref(val); - send_api_response(res, sockd); -} - -static void delete_proxy(ckpool_t *ckp, gdata_t *gdata, proxy_instance_t *proxy) -{ - proxy_instance_t *subproxy; - - /* Remove the proxy from the master list first */ - mutex_lock(&gdata->lock); - HASH_DEL(gdata->proxies, proxy); - /* Disable all its threads */ - pthread_cancel(proxy->pth_precv); - close_proxy_socket(proxy, proxy); - mutex_unlock(&gdata->lock); - - /* Recycle all its subproxies */ - do { - mutex_lock(&proxy->proxy_lock); - subproxy = proxy->subproxies; - if (subproxy) - HASH_DELETE(sh, proxy->subproxies, subproxy); - mutex_unlock(&proxy->proxy_lock); - - if (subproxy) { - close_proxy_socket(proxy, subproxy); - send_stratifier_delproxy(ckp, subproxy->id, subproxy->subid); - if (proxy != subproxy) - store_proxy(gdata, subproxy); - } - } while (subproxy); - - /* Recycle the proxy itself */ - store_proxy(gdata, proxy); -} - -static void parse_delproxy(ckpool_t *ckp, gdata_t *gdata, const int sockd, const char *buf) -{ - json_t *val = NULL, *res = NULL; - proxy_instance_t *proxy; - json_error_t err_val; - int id = -1; - - val = json_loads(buf, 0, &err_val); - if (unlikely(!val)) { - res = json_encode_errormsg(&err_val); - goto out; - } - json_get_int(&id, val, "id"); - proxy = proxy_by_id(gdata, id); - if (!proxy) { - res = json_errormsg("Proxy id %d not found", id); - goto out; - } - JSON_CPACK(res, "{si,ss,ss,ss,ss}", "id", proxy->id, "url", proxy->url, - "baseurl", proxy->baseurl,"auth", proxy->auth, "pass", proxy->pass); - - LOGNOTICE("Deleting proxy %d:%s", proxy->id, proxy->url); - delete_proxy(ckp, gdata, proxy); -out: - if (val) - json_decref(val); - send_api_response(res, sockd); -} - -static void parse_ableproxy(gdata_t *gdata, const int sockd, const char *buf, bool disable) -{ - json_t *val = NULL, *res = NULL; - proxy_instance_t *proxy; - json_error_t err_val; - int id = -1; - - val = json_loads(buf, 0, &err_val); - if (unlikely(!val)) { - res = json_encode_errormsg(&err_val); - goto out; - } - json_get_int(&id, val, "id"); - proxy = proxy_by_id(gdata, id); - if (!proxy) { - res = json_errormsg("Proxy id %d not found", id); - goto out; - } - JSON_CPACK(res, "{si,ss, ss,ss,ss}", "id", proxy->id, "url", proxy->url, - "baseurl", proxy->baseurl,"auth", proxy->auth, "pass", proxy->pass); - if (proxy->disabled != disable) { - proxy->disabled = disable; - LOGNOTICE("%sabling proxy %d:%s", disable ? "Dis" : "En", id, proxy->url); - } - if (disable) { - /* Set disabled bool here in case this is a parent proxy */ - proxy->disabled = true; - disable_subproxy(gdata, proxy, proxy); - } else - reconnect_proxy(proxy); -out: - if (val) - json_decref(val); - send_api_response(res, sockd); -} - -static void send_stats(gdata_t *gdata, const int sockd) -{ - json_t *val = json_object(), *subval; - int total_objects, objects; - int64_t generated, memsize; - proxy_instance_t *proxy; - stratum_msg_t *msg; - - mutex_lock(&gdata->lock); - objects = HASH_COUNT(gdata->proxies); - memsize = SAFE_HASH_OVERHEAD(gdata->proxies) + sizeof(proxy_instance_t) * objects; - generated = gdata->proxies_generated; - JSON_CPACK(subval, "{si,sI,sI}", "count", objects, "memory", memsize, "generated", generated); - json_set_object(val, "proxies", subval); - - DL_COUNT(gdata->dead_proxies, proxy, objects); - memsize = sizeof(proxy_instance_t) * objects; - JSON_CPACK(subval, "{si,sI}", "count", objects, "memory", memsize); - json_set_object(val, "dead_proxies", subval); - - total_objects = memsize = 0; - for (proxy = gdata->proxies; proxy; proxy=proxy->hh.next) { - mutex_lock(&proxy->proxy_lock); - total_objects += objects = HASH_COUNT(proxy->subproxies); - memsize += SAFE_HASH_OVERHEAD(proxy->subproxies) + sizeof(proxy_instance_t) * objects; - mutex_unlock(&proxy->proxy_lock); - } - generated = gdata->subproxies_generated; - mutex_unlock(&gdata->lock); - - JSON_CPACK(subval, "{si,sI,sI}", "count", total_objects, "memory", memsize, "generated", generated); - json_set_object(val, "subproxies", subval); - - mutex_lock(&gdata->notify_lock); - objects = HASH_COUNT(gdata->notify_instances); - memsize = SAFE_HASH_OVERHEAD(gdata->notify_instances) + sizeof(notify_instance_t) * objects; - generated = gdata->proxy_notify_id; - mutex_unlock(&gdata->notify_lock); - - JSON_CPACK(subval, "{si,sI,sI}", "count", objects, "memory", memsize, "generated", generated); - json_set_object(val, "notifies", subval); - - mutex_lock(&gdata->share_lock); - objects = HASH_COUNT(gdata->shares); - memsize = SAFE_HASH_OVERHEAD(gdata->shares) + sizeof(share_msg_t) * objects; - generated = gdata->share_id; - mutex_unlock(&gdata->share_lock); - - JSON_CPACK(subval, "{si,sI,sI}", "count", objects, "memory", memsize, "generated", generated); - json_set_object(val, "shares", subval); - - mutex_lock(&gdata->psend_lock); - DL_COUNT(gdata->psends, msg, objects); - generated = gdata->psends_generated; - mutex_unlock(&gdata->psend_lock); - - memsize = sizeof(stratum_msg_t) * objects; - JSON_CPACK(subval, "{si,sI,sI}", "count", objects, "memory", memsize, "generated", generated); - json_set_object(val, "psends", subval); - - send_api_response(val, sockd); -} - -/* Entered with parent proxy locked */ -static json_t *__proxystats(proxy_instance_t *proxy, proxy_instance_t *parent, bool discrete) -{ - json_t *val = json_object(); - - /* Opportunity to update hashrate just before we report it without - * needing to check on idle proxies regularly */ - __decay_proxy(proxy, parent, 0); - - json_set_int(val, "id", proxy->id); - json_set_int(val, "userid", proxy->userid); - json_set_string(val, "baseurl", proxy->baseurl); - json_set_string(val, "url", proxy->url); - json_set_string(val, "auth", proxy->auth); - json_set_string(val, "pass", proxy->pass); - json_set_string(val, "enonce1", proxy->enonce1 ? proxy->enonce1 : ""); - json_set_int(val, "nonce1len", proxy->nonce1len); - json_set_int(val, "nonce2len", proxy->nonce2len); - json_set_double(val, "diff", proxy->diff); - if (parent_proxy(proxy)) { - json_set_double(val, "total_accepted", proxy->total_accepted); - json_set_double(val, "total_rejected", proxy->total_rejected); - json_set_int(val, "subproxies", proxy->subproxy_count); - json_set_double(val, "tdsps1", proxy->tdsps1); - json_set_double(val, "tdsps5", proxy->tdsps5); - json_set_double(val, "tdsps60", proxy->tdsps60); - json_set_double(val, "tdsps1440", proxy->tdsps1440); - } - if (discrete) { - json_set_double(val, "dsps1", proxy->dsps1); - json_set_double(val, "dsps5", proxy->dsps5); - json_set_double(val, "dsps60", proxy->dsps60); - json_set_double(val, "dsps1440", proxy->dsps1440); - json_set_double(val, "accepted", proxy->diff_accepted); - json_set_double(val, "rejected", proxy->diff_rejected); - } - json_set_string(val, "connect", proxy_status[parent->connect_status]); - json_set_string(val, "subscribe", proxy_status[parent->subscribe_status]); - json_set_string(val, "authorise", proxy_status[parent->auth_status]); - json_set_int(val, "backoff", parent->backoff); - json_set_int(val, "lastshare", proxy->last_share.tv_sec); - json_set_bool(val, "global", proxy->global); - json_set_bool(val, "disabled", proxy->disabled); - json_set_bool(val, "alive", proxy->alive); - json_set_int(val, "maxclients", proxy->clients_per_proxy); - - return val; -} - -static json_t *proxystats(proxy_instance_t *proxy, bool discrete) -{ - proxy_instance_t *parent = proxy->parent; - json_t *val; - - mutex_lock(&parent->proxy_lock); - val = __proxystats(proxy, parent, discrete); - mutex_unlock(&parent->proxy_lock); - - return val; -} - -static json_t *all_proxystats(gdata_t *gdata) -{ - json_t *res, *arr_val = json_array(); - proxy_instance_t *proxy, *tmp; - - mutex_lock(&gdata->lock); - HASH_ITER(hh, gdata->proxies, proxy, tmp) { - mutex_unlock(&gdata->lock); - json_array_append_new(arr_val, proxystats(proxy, false)); - mutex_lock(&gdata->lock); - } - mutex_unlock(&gdata->lock); - - JSON_CPACK(res, "{so}", "proxy", arr_val); - return res; -} - -static void parse_proxystats(gdata_t *gdata, const int sockd, const char *buf) -{ - json_t *val = NULL, *res = NULL; - proxy_instance_t *proxy; - json_error_t err_val; - bool totals = false; - int id, subid = 0; - - val = json_loads(buf, 0, &err_val); - if (unlikely(!val)) { - res = all_proxystats(gdata); - goto out_noval; - } - if (!json_get_int(&id, val, "id")) { - res = all_proxystats(gdata); - goto out; - } - if (!json_get_int(&subid, val, "subid")) - totals = true; - proxy = proxy_by_id(gdata, id); - if (!proxy) { - res = json_errormsg("Proxy id %d not found", id); - goto out; - } - if (!totals) - proxy = subproxy_by_id(proxy, subid); - if (!proxy) { - res = json_errormsg("Proxy id %d:%d not found", id, subid); - goto out; - } - res = proxystats(proxy, true); -out: - json_decref(val); -out_noval: - send_api_response(res, sockd); -} - -static void send_subproxystats(gdata_t *gdata, const int sockd) -{ - json_t *res, *arr_val = json_array(); - proxy_instance_t *parent, *tmp; - - mutex_lock(&gdata->lock); - HASH_ITER(hh, gdata->proxies, parent, tmp) { - json_t *val, *subarr_val = json_array(); - proxy_instance_t *subproxy, *subtmp; - - mutex_unlock(&gdata->lock); - - mutex_lock(&parent->proxy_lock); - HASH_ITER(sh, parent->subproxies, subproxy, subtmp) { - val = __proxystats(subproxy, parent, true); - json_set_int(val, "subid", subproxy->subid); - json_array_append_new(subarr_val, val); - } - mutex_unlock(&parent->proxy_lock); - - JSON_CPACK(val, "{si,so}", - "id", parent->id, - "subproxy", subarr_val); - json_array_append_new(arr_val, val); - mutex_lock(&gdata->lock); - } - mutex_unlock(&gdata->lock); - - JSON_CPACK(res, "{so}", "proxy", arr_val); - send_api_response(res, sockd); -} - -static void parse_globaluser(ckpool_t *ckp, gdata_t *gdata, const char *buf) -{ - char *url, *username, *pass = strdupa(buf); - int userid = -1, proxyid = -1; - proxy_instance_t *proxy, *tmp; - int64_t clientid = -1; - bool found = false; - - sscanf(buf, "%d:%d:%"PRId64":%s", &proxyid, &userid, &clientid, pass); - if (unlikely(clientid < 0 || userid < 0 || proxyid < 0)) { - LOGWARNING("Failed to parse_globaluser ids from command %s", buf); - return; - } - username = strsep(&pass, ","); - if (unlikely(!username)) { - LOGWARNING("Failed to parse_globaluser username from command %s", buf); - return; - } - - LOGDEBUG("Checking userproxy proxy %d user %d:%"PRId64" worker %s pass %s", - proxyid, userid, clientid, username, pass); - - if (unlikely(proxyid >= ckp->proxies)) { - LOGWARNING("Trying to find non-existent proxy id %d in parse_globaluser", proxyid); - return; - } - - mutex_lock(&gdata->lock); - url = ckp->proxyurl[proxyid]; - HASH_ITER(hh, gdata->proxies, proxy, tmp) { - if (!strcmp(proxy->auth, username)) { - found = true; - break; - } - } - mutex_unlock(&gdata->lock); - - if (found) - return; - add_userproxy(ckp, gdata, userid, url, username, pass); -} - -static void proxy_loop(proc_instance_t *pi) -{ - proxy_instance_t *proxi = NULL, *cproxy; - server_instance_t *si = NULL, *old_si; - ckpool_t *ckp = pi->ckp; - gdata_t *gdata = ckp->gdata; - unix_msg_t *umsg = NULL; - connsock_t *cs = NULL; - char *buf = NULL; - -reconnect: - clear_unix_msg(&umsg); - - if (ckp->node) { - old_si = si; - si = live_server(ckp, gdata); - if (!si) - goto out; - cs = &si->cs; - if (!old_si) - LOGWARNING("Connected to bitcoind: %s:%s", cs->url, cs->port); - else if (si != old_si) - LOGWARNING("Failed over to bitcoind: %s:%s", cs->url, cs->port); - } - - /* This does not necessarily mean we reconnect, but a change has - * occurred and we need to reexamine the proxies. */ - cproxy = wait_best_proxy(ckp, gdata); - if (!cproxy) - goto out; - if (proxi != cproxy) { - gdata->current_proxy = proxi = cproxy; - LOGWARNING("Successfully connected to pool %d %s as proxy%s", - proxi->id, proxi->url, ckp->passthrough ? " in passthrough mode" : ""); - } - - if (unlikely(!ckp->generator_ready)) { - ckp->generator_ready = true; - LOGWARNING("%s generator ready", ckp->name); - } -retry: - clear_unix_msg(&umsg); - do { - umsg = get_unix_msg(pi); - } while (!umsg); - - buf = umsg->buf; - LOGDEBUG("Proxy received request: %s", buf); - if (cmdmatch(buf, "stats")) { - send_stats(gdata, umsg->sockd); - } else if (cmdmatch(buf, "list")) { - send_list(gdata, umsg->sockd); - } else if (cmdmatch(buf, "sublist")) { - send_sublist(gdata, umsg->sockd, buf + 8); - } else if (cmdmatch(buf, "addproxy")) { - parse_addproxy(ckp, gdata, umsg->sockd, buf + 9); - } else if (cmdmatch(buf, "delproxy")) { - parse_delproxy(ckp, gdata, umsg->sockd, buf + 9); - } else if (cmdmatch(buf, "enableproxy")) { - parse_ableproxy(gdata, umsg->sockd, buf + 12, false); - } else if (cmdmatch(buf, "disableproxy")) { - parse_ableproxy(gdata, umsg->sockd, buf + 13, true); - } else if (cmdmatch(buf, "proxystats")) { - parse_proxystats(gdata, umsg->sockd, buf + 11); - } else if (cmdmatch(buf, "subproxystats")) { - send_subproxystats(gdata, umsg->sockd); - } else if (cmdmatch(buf, "globaluser")) { - parse_globaluser(ckp, gdata, buf + 11); - } else if (cmdmatch(buf, "reconnect")) { - goto reconnect; - } else if (cmdmatch(buf, "submitblock:")) { - char blockmsg[80]; - bool ret; - - LOGNOTICE("Submitting likely block solve share from upstream pool"); - ret = submit_block(cs, buf + 12 + 64 + 1); - memset(buf + 12 + 64, 0, 1); - sprintf(blockmsg, "%sblock:%s", ret ? "" : "no", buf + 12); - send_proc(ckp->stratifier, blockmsg); - } else if (cmdmatch(buf, "submittxn:")) { - if (unlikely(strlen(buf) < 11)) { - LOGWARNING("Got zero length submittxn"); - goto retry; - } - submit_txn(cs, buf + 10); - } else if (cmdmatch(buf, "loglevel")) { - sscanf(buf, "loglevel=%d", &ckp->loglevel); - } else if (cmdmatch(buf, "ping")) { - LOGDEBUG("Proxy received ping request"); - send_unix_msg(umsg->sockd, "pong"); - } else if (cmdmatch(buf, "recruit")) { - recruit_subproxy(gdata, buf); - } else if (cmdmatch(buf, "dropproxy")) { - drop_proxy(gdata, buf); - } else { - LOGWARNING("Generator received unrecognised message: %s", buf); - } - goto retry; -out: - return; -} - -/* Check which servers are alive, maintaining a connection with them and - * reconnect if a higher priority one is available. */ -static void *server_watchdog(void *arg) -{ - ckpool_t *ckp = (ckpool_t *)arg; - gdata_t *gdata = ckp->gdata; - - rename_proc("swatchdog"); - - pthread_detach(pthread_self()); - - while (42) { - server_instance_t *best = NULL; - ts_t timer_t; - int i; - - cksleep_prepare_r(&timer_t); - for (i = 0; i < ckp->btcds; i++) { - server_instance_t *si = ckp->servers[i]; - - /* Have we reached the current server? */ - if (server_alive(ckp, si, true) && !best) - best = si; - } - if (best && best != gdata->current_si) - send_proc(ckp->generator, "reconnect"); - cksleep_ms_r(&timer_t, 5000); - } - return NULL; -} - -static void setup_servers(ckpool_t *ckp) -{ - pthread_t pth_watchdog; - int i; - - ckp->servers = ckalloc(sizeof(server_instance_t *) * ckp->btcds); - for (i = 0; i < ckp->btcds; i++) { - server_instance_t *si; - connsock_t *cs; - - ckp->servers[i] = ckzalloc(sizeof(server_instance_t)); - si = ckp->servers[i]; - si->url = ckp->btcdurl[i]; - si->auth = ckp->btcdauth[i]; - si->pass = ckp->btcdpass[i]; - si->notify = ckp->btcdnotify[i]; - si->id = i; - cs = &si->cs; - cs->ckp = ckp; - cksem_init(&cs->sem); - cksem_post(&cs->sem); - } - - create_pthread(&pth_watchdog, server_watchdog, ckp); -} - -static void server_mode(ckpool_t *ckp, proc_instance_t *pi) -{ - int i; - - setup_servers(ckp); - - gen_loop(pi); - - for (i = 0; i < ckp->btcds; i++) { - server_instance_t *si = ckp->servers[i]; - - kill_server(si); - dealloc(si); - } - dealloc(ckp->servers); -} - -static proxy_instance_t *__add_proxy(ckpool_t *ckp, gdata_t *gdata, const int id) -{ - proxy_instance_t *proxy; - - gdata->proxies_generated++; - proxy = ckzalloc(sizeof(proxy_instance_t)); - proxy->id = id; - proxy->url = strdup(ckp->proxyurl[id]); - proxy->baseurl = strdup(proxy->url); - proxy->auth = strdup(ckp->proxyauth[id]); - if (ckp->proxypass[id]) - proxy->pass = strdup(ckp->proxypass[id]); - else - proxy->pass = strdup(""); - proxy->ckp = proxy->cs.ckp = ckp; - HASH_ADD_INT(gdata->proxies, id, proxy); - proxy->global = true; - cksem_init(&proxy->cs.sem); - cksem_post(&proxy->cs.sem); - return proxy; -} - -static void proxy_mode(ckpool_t *ckp, proc_instance_t *pi) -{ - gdata_t *gdata = ckp->gdata; - proxy_instance_t *proxy; - int i; - - mutex_init(&gdata->lock); - mutex_init(&gdata->notify_lock); - mutex_init(&gdata->share_lock); - - if (ckp->node) - setup_servers(ckp); - - /* Create all our proxy structures and pointers */ - for (i = 0; i < ckp->proxies; i++) { - proxy = __add_proxy(ckp, gdata, i); - if (ckp->passthrough) { - create_pthread(&proxy->pth_precv, passthrough_recv, proxy); - proxy->passsends = create_ckmsgq(ckp, "passsend", &passthrough_send); - } else { - mutex_init(&gdata->psend_lock); - cond_init(&gdata->psend_cond); - prepare_proxy(proxy); - create_pthread(&gdata->pth_uprecv, userproxy_recv, ckp); - create_pthread(&gdata->pth_psend, proxy_send, ckp); - } - } - - proxy_loop(pi); -} - -void *generator(void *arg) -{ - proc_instance_t *pi = (proc_instance_t *)arg; - ckpool_t *ckp = pi->ckp; - gdata_t *gdata; - - rename_proc(pi->processname); - LOGWARNING("%s generator starting", ckp->name); - gdata = ckzalloc(sizeof(gdata_t)); - ckp->gdata = gdata; - gdata->ckp = ckp; - - if (ckp->proxy) { - /* Wait for the stratifier to be ready for us */ - while (!ckp->stratifier_ready) - cksleep_ms(10); - proxy_mode(ckp, pi); - } else - server_mode(ckp, pi); - /* We should never get here unless there's a fatal error */ - LOGEMERG("Generator failure, shutting down"); - exit(1); - return NULL; -} diff --git a/solo-ckpool-source/src/generator.h b/solo-ckpool-source/src/generator.h deleted file mode 100644 index 4331956..0000000 --- a/solo-ckpool-source/src/generator.h +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright 2014-2018,2023 Con Kolivas - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 3 of the License, or (at your option) - * any later version. See COPYING for more details. - */ - -#ifndef GENERATOR_H -#define GENERATOR_H - -#include "config.h" - -#define GETBEST_FAILED -1 -#define GETBEST_NOTIFY 0 -#define GETBEST_SUCCESS 1 - -void generator_add_send(ckpool_t *ckp, json_t *val); -struct genwork *generator_getbase(ckpool_t *ckp); -int generator_getbest(ckpool_t *ckp, char *hash); -bool generator_checkaddr(ckpool_t *ckp, const char *addr, bool *script, bool *segwit); -bool generator_checktxn(const ckpool_t *ckp, const char *txn, json_t **val); -char *generator_get_txn(ckpool_t *ckp, const char *hash); -bool generator_submitblock(ckpool_t *ckp, const char *buf); -void generator_preciousblock(ckpool_t *ckp, const char *hash); -bool generator_get_blockhash(ckpool_t *ckp, int height, char *hash); -void *generator(void *arg); - -#endif /* GENERATOR_H */ diff --git a/solo-ckpool-source/src/jansson-2.14/CHANGES b/solo-ckpool-source/src/jansson-2.14/CHANGES deleted file mode 100644 index cb6ff07..0000000 --- a/solo-ckpool-source/src/jansson-2.14/CHANGES +++ /dev/null @@ -1,986 +0,0 @@ -Version 2.14 -============ - -Released 2021-09-09 - -* New Features: - - - Add `json_object_getn`, `json_object_setn`, `json_object_deln`, and the - corresponding `nocheck` functions. (#520, by Maxim Zhukov) - -* Fixes: - - - Handle `sprintf` corner cases (#537, by Tobias Stoeckmann) - -* Build: - - - Symbol versioning for all exported symbols (#540, by Simon McVittie) - - Fix compiler warnings (#555, by Kelvin Lee) - -* Documentation: - - - Small fixes (#544, #546, by @i-ky) - - Sphinx 3 compatibility (#543, by Pierce Lopez) - - -Version 2.13.1 -============== - -Released 2020-05-07 - -* Build: - - - Include `jansson_version_str()` and `jansson_version_cmp()` in - shared library. (#534) - - - Include ``scripts/`` in tarball. (#535) - - -Version 2.13 -============ - -Released 2020-05-05 - -* New Features: - - - Add `jansson_version_str()` and `jansson_version_cmp()` for runtime - version checking (#465). - - - Add `json_object_update_new()`, `json_object_update_existing_new()` - and `json_object_update_missing_new()` functions (#499). - - - Add `json_object_update_recursive()` (#505). - -* Build: - - - Add ``-Wno-format-truncation`` to suppress format truncation warnings (#489). - -* Bug fixes: - - - Remove ``strtod`` macro definition for MinGW (#498). - - - Add infinite loop check in `json_deep_copy()` (#490). - - - Add ``pipe`` macro definition for MinGW (#500). - - - Enhance ``JANSSON_ATTRS`` macro to support earlier C standard(C89) (#501). - - - Update version detection for sphinx-build (#502). - -* Documentation: - - - Fix typos (#483, #494). - - - Document that call the custom free function to free the return value - of `json_dumps()` if you have a custom malloc/free (#490). - - - Add vcpkg installation instructions (#496). - - - Document that non-blocking file descriptor is not supported on - `json_loadfd()` (#503). - - -Version 2.12 -============ - -Released 2018-11-26 - -* Bug fixes: - - - Fix error message in `json_pack()` for NULL object (#409). - - - Avoid invalid memory read in `json_pack()` (#421). - - - Call va_end after va_copy in `json_vsprintf()` (#427). - - - Improve handling of formats with '?' and '*' in `json_pack()` (#438). - - - Remove inappropriate `jsonp_free()` which caused segmentation fault in - error handling (#444). - -* Build: - - - Add function attributes for GCC and CLANG to provide warnings on improper - use of jansson routines (#404). - - - Many CMake fixes (#408, #412, #415). - - - Enable -Bsymbolic-functions linker flag whenever possible. - - - Resolve various compiler warnings (#423, #430, #435, #436). - - - Fix code coverage ignored paths (#439). - -* Other: - - - Test coverage improvements (#398, #400). - - - Add VS 2017 to appveyor, update Visual Studio documentation (#417). - - - Update copyright for 2018 (#424). - - - Update install instructions in README (#401). - -Version 2.11 -============ - -Released 2018-02-09 - -* New features: - - - Add `json_pack()` format specifiers s*, o* and O* for values that - can be omitted if null (#339). - - - Add `json_error_code()` to retrieve numeric error codes (#365, #380, - #381). - - - Enable thread safety for `json_dump()` on all systems. Enable thread - safe `json_decref()` and `json_incref()` for modern compilers (#389). - - - Add `json_sprintf()` and `json_vsprintf()` (#393). - -* Bug Fixes: - - - Fix incorrect report of success from `json_dump_file()` when an error - is returned by `fclose()` (#359). - - - Make json_equal() const-correct (#344). - - - Fix incomplete stealing of references by `json_pack()` (#374). - -* Build: - - - Work around gcc's -Wimplicit-fallthrough. - - - Fix CMake detection of ``sys/types.h`` header (#375). - - - Fix `jansson.pc` generated by CMake to be more consistent with the one - generated using GNU Autotools (#368). - -* Other: - - - Miscellaneous documentation fixes (#356, #378, #395). - - - Remove unnecessary reference actions from parsers (#377). - -Version 2.10 -============ - -Released 2017-03-02 - -* New features: - - - Add JSON_EMBED encoding flag allowing arrays and objects to be encoded - into existing streams (#329). - - - Add `json_dumpb()` function for dumping to a pre-allocated buffer (#328). - - - Add `json_dumpfd()` and `json_loadfd()` functions for dumping to streaming - file descriptors (#328). - - - Add support for parsing buffers larger than 2GB (#309). - -* Build: - - - Fix CMake build when LONG_LONG_INT is defined as "" (#321) - -* Other: - - - Internal code cleanup (#311, #314) - -Version 2.9 -=========== - -Released 2016-09-18 - -* New features: - - - Add ``json_auto_t`` to automatically decref a value that goes out - of scope. Available only on GCC and Clang. (#301) - -* Build: - - - Fix CMake build (at least on Linux) by removing conflicting - jansson_config.h from the distribution (#306) - - - Change CMake install target generation to be optional (#305) - -* Documentation: - - - Small documentation fixes. - - -Version 2.8 -=========== - -Released 2016-08-30 - -* New features: - - - Always preserve insertion order of object items. - `json_object_iter()` and friends, `json_object_foreach()` and - `json_dumps()` and friends now always work in the insertion order of - object items (#293). - - - Add `json_object_foreach_safe()` macro that allows - `json_object_del()` calls during iteration (#230). - - - Add `json_get_alloc_funcs()` to allow reading the allocation - functions set by `json_set_alloc_funcs()` (#262, #264). - - - Add `json_pack()` format specifiers s?, o? and O? for values that - can be null (#261, #270). - -* Bug fixes: - - - Fix a crash when parsing inputs consisting of very deeply nested - arrays or objects (#282, #284). - - - Never convert numbers to integers in the parser when - JSON_DECODE_INT_AS_REAL is set. This fixes error messages for - overflowing numbers when JSON_DECODE_INT_AS_REAL is set (#212). - - - Fix a use-after-free in `json_pack()` error handling. - - - Fix subnormal number parsing on mingw32. - - - Handle out-of-memory situations gracefully in the hashtable - implementation (#298). - -* Build: - - - Fix build with CMake on all versions of Visual Studio up to 2015 - (#262, #289). - - - Fix pkgconfig libdir when using CMake (#268). - - - Fix CMake config for static CRT builds on Windows (#206). - - - Fix warnings on LLVM 6.0 targeting iOS arm64 (#208). - - - Add coverlls.io support via Travis for a nice test coverage badge - (#211). - - - Don't expect ``jansson_config.h`` to be in the compiler's include - path (#209). - - - Add a build-time option to set initial hashtable size (#213). - - - Use snprintf and strncpy in place of sprintf and strcpy to silence - linker warnings on OpenBSD (#233). - -* Documentation: - - - Fix various typos in documentation, and a broken link (#258). - - - Add an example program in ``examples/`` (#214, #217). - - - Fix building of documentation man pages (#207). - - - Document the fact that copying objects doesn't preserve the - insertion order of keys (#237). - -* Tests: - - - Don't use the nonstandard __FUNCTION__ macro in tests. - - - Use expr instead of $((...)) in shell scripts for Solaris 10 - compatibility. - - - Disable Visual Studio warning C4756 when triggered deliberately in - tests (#216). - - - Other minor fixes (#221, #248). - -* Other changes: - - - List all unrecognized object keys when strict unpacking fails - (#263). - - - Alter the order of the members of the hashtable_pair struct for - easier debugging. - - - Minor performance improvement to `json_dump()` and friends (#234). - - - Minor style fixes (#255, #257). - - -Version 2.7 -=========== - -Released 2014-10-02 - -* New features: - - - `json_pack()` and friends: Add format specifiers ``s%`` and ``+%`` - for a size_t string length (#141). - - - `json_unpack()` and friends: Add format specifier ``s%`` for - unpacking the string length along with the string itself (#141). - - - Add length-aware string constructors `json_stringn()` and - `json_stringn_nocheck()`, length-aware string mutators - `json_string_setn()` and `json_string_setn_nocheck()`, and a - function for getting string's length `json_string_length()` (#141, - #143). - - - Support ``\u0000`` escapes in the decoder. The support can be - enabled by using the ``JSON_ALLOW_NUL`` decoding flag (#141). - - - Add `json_boolean_value()` as an alias for `json_is_true()` - (#146). - - - Add JSON_REAL_PRECISION encoding flag/macro for controlling real - number precision (#178). - - - Define the maximum indentation as JSON_MAX_INDENT (#191). - -* Bug fixes: - - - Some malformed ``\uNNNN`` escapes could crash the decoder with an - assertion failure. - - - Avoid integer overflows with very long strings in UTF-8 decoder and - hashtable. - - - Check for *NULL* key in `json_object_get()` and - `json_object_del()` (#151). - - - Enhance hashtable seeding on Windows (#162). - - - `json_unpack()`: Allow mixing JSON_STRICT with optional keys - (#162, #163). - - - Fix int/int32 mismatch (#142). - - - Parse subnormal numbers correctly (#202). - -* Build: - - - Remove VS2010 build files. CMake should be used on Windows instead - (#165). - - - Fix CMake build flags for MinGW (#193). - - - Add CMake config files for find_package. Rename config.h to - jansson_private_config.h (#157, #159). - - - Make Valgrind checks work with CMake (#160). - - - Fix feature checks to use correct __ATOMIC flags. - - - Fix CMake checks for uint16_t and uint8_t support (#177). - - - Make Jansson build on SmartOS/Solaris (#171). - - - Work around a GCC bug on Solaris (#175). - - - Fix autoreconf on Debian (#182). - - - Don't use GNU make specific export for global AM_CFLAGS (#203, - #204). - - - Fix building on Android using the supplied Android.mk (#166, - #174). - - - Android.mk: Add -DHAVE_STDINT_H to LOCAL_CFLAGS (#200). - -* Documentation: - - - Document JANSSON_BUILD_SHARED_LIBS CMake option (#187). - -* Tests: - - - Close file handles correctly (#198). - -* Other changes: - - - ``\uNNNN`` escapes are now encoded in upper case for better - readability. - - - Enable usage of AddressSanitizer (#180). - - -Version 2.6 -=========== - -Released 2014-02-11 - -* Security: - - - CVE-2013-6401: The hash function used by the hashtable - implementation has been changed, and is automatically seeded with - random data when the first JSON object is created. This prevents - an attacker from causing large JSON objects with specially crafted - keys perform poorly. - -* New features: - - - `json_object_seed()`: Set the seed value of the hash function. - -* Bug fixes: - - - Include CMake specific files in the release tarball. - -* Documentation: - - - Fix tutorial source to send a User-Agent header, which is now - required by the GitHub API. - - - Set all memory to zero in secure_free() example. - - -Version 2.5 -=========== - -Released 2013-09-19 - -* New features: - - - `json_pack()` and friends: Add format specifiers ``s#``, ``+`` and - ``+#``. - - - Add ``JSON_DECODE_INT_AS_REAL`` decoding flag to treat all numbers - as real in the decoder (#123). - - - Add `json_array_foreach()`, paralleling `json_object_foreach()` - (#118). - -* Bug fixes: - - - `json_dumps()` and friends: Don't crash if json is *NULL* and - ``JSON_ENCODE_ANY`` is set. - - - Fix a theoretical integer overflow in `jsonp_strdup()`. - - - Fix `l_isxdigit()` macro (#97). - - - Fix an off-by-one error in `json_array_remove()`. - -* Build: - - - Support CMake in addition to GNU Autotools (#106, #107, #112, - #115, #120, #127). - - - Support building for Android (#109). - - - Don't use ``-Werror`` by default. - - - Support building and testing with VPATH (#93). - - - Fix compilation when ``NDEBUG`` is defined (#128) - -* Tests: - - - Fix a refleak in ``test/bin/json_process.c``. - -* Documentation: - - - Clarify the return value of `json_load_callback_t`. - - - Document how to circumvent problems with separate heaps on Windows. - - - Fix memory leaks and warnings in ``github_commits.c``. - - - Use `json_decref()` properly in tutorial. - -* Other: - - - Make it possible to forward declare ``struct json_t``. - - -Version 2.4 -=========== - -Released 2012-09-23 - -* New features: - - - Add `json_boolean()` macro that returns the JSON true or false - value based on its argument (#86). - - - Add `json_load_callback()` that calls a callback function - repeatedly to read the JSON input (#57). - - - Add JSON_ESCAPE_SLASH encoding flag to escape all occurences of - ``/`` with ``\/``. - -* Bug fixes: - - - Check for and reject NaN and Inf values for reals. Encoding these - values resulted in invalid JSON. - - - Fix `json_real_set()` to return -1 on error. - -* Build: - - - Jansson now builds on Windows with Visual Studio 2010, and - includes solution and project files in ``win32/vs2010/`` - directory. - - - Fix build warnings (#77, #78). - - - Add ``-no-undefined`` to LDFLAGS (#90). - -* Tests: - - - Fix the symbol exports test on Linux/PPC64 (#88). - -* Documentation: - - - Fix typos (#73, #84). - - -Version 2.3.1 -============= - -Released 2012-04-20 - -* Build issues: - - - Only use ``long long`` if ``strtoll()`` is also available. - -* Documentation: - - - Fix the names of library version constants in documentation. (#52) - - - Change the tutorial to use GitHub API v3. (#65) - -* Tests: - - - Make some tests locale independent. (#51) - - - Distribute the library exports test in the tarball. - - - Make test run on shells that don't support the ``export FOO=bar`` - syntax. - - -Version 2.3 -=========== - -Released 2012-01-27 - -* New features: - - - `json_unpack()` and friends: Add support for optional object keys - with the ``{s?o}`` syntax. - - - Add `json_object_update_existing()` and - `json_object_update_missing()`, for updating only existing keys or - only adding missing keys to an object. (#37) - - - Add `json_object_foreach()` for more convenient iteration over - objects. (#45, #46) - - - When decoding JSON, write the number of bytes that were read from - input to ``error.position`` also on success. This is handy with - ``JSON_DISABLE_EOF_CHECK``. - - - Add support for decoding any JSON value, not just arrays or - objects. The support is enabled with the new ``JSON_DECODE_ANY`` - flag. Patch by Andrea Marchesini. (#4) - -* Bug fixes - - - Avoid problems with object's serial number growing too big. (#40, - #41) - - - Decoding functions now return NULL if the first argument is NULL. - Patch by Andrea Marchesini. - - - Include ``jansson_config.h.win32`` in the distribution tarball. - - - Remove ``+`` and leading zeros from exponents in the encoder. - (#39) - - - Make Jansson build and work on MinGW. (#39, #38) - -* Documentation - - - Note that the same JSON values must not be encoded in parallel by - separate threads. (#42) - - - Document MinGW support. - - -Version 2.2.1 -============= - -Released 2011-10-06 - -* Bug fixes: - - - Fix real number encoding and decoding under non-C locales. (#32) - - - Fix identifier decoding under non-UTF-8 locales. (#35) - - - `json_load_file()`: Open the input file in binary mode for maximum - compatiblity. - -* Documentation: - - - Clarify the lifecycle of the result of the ``s`` fromat of - `json_unpack()`. (#31) - - - Add some portability info. (#36) - - - Little clarifications here and there. - -* Other: - - - Some style fixes, issues detected by static analyzers. - - -Version 2.2 -=========== - -Released 2011-09-03 - -* New features: - - - `json_dump_callback()`: Pass the encoder output to a callback - function in chunks. - -* Bug fixes: - - - `json_string_set()`: Check that target is a string and value is - not NULL. - -* Other: - - - Documentation typo fixes and clarifications. - - -Version 2.1 -=========== - -Released 2011-06-10 - -* New features: - - - `json_loadb()`: Decode a string with a given size, useful if the - string is not null terminated. - - - Add ``JSON_ENCODE_ANY`` encoding flag to allow encoding any JSON - value. By default, only arrays and objects can be encoded. (#19) - - - Add ``JSON_REJECT_DUPLICATES`` decoding flag to issue a decoding - error if any JSON object in the input contins duplicate keys. (#3) - - - Add ``JSON_DISABLE_EOF_CHECK`` decoding flag to stop decoding after a - valid JSON input. This allows other data after the JSON data. - -* Bug fixes: - - - Fix an additional memory leak when memory allocation fails in - `json_object_set()` and friends. - - - Clear errno before calling `strtod()` for better portability. (#27) - -* Building: - - - Avoid set-but-not-used warning/error in a test. (#20) - -* Other: - - - Minor clarifications to documentation. - - -Version 2.0.1 -============= - -Released 2011-03-31 - -* Bug fixes: - - - Replace a few `malloc()` and `free()` calls with their - counterparts that support custom memory management. - - - Fix object key hashing in json_unpack() strict checking mode. - - - Fix the parentheses in ``JANSSON_VERSION_HEX`` macro. - - - Fix `json_object_size()` return value. - - - Fix a few compilation issues. - -* Portability: - - - Enhance portability of `va_copy()`. - - - Test framework portability enhancements. - -* Documentation: - - - Distribute ``doc/upgrading.rst`` with the source tarball. - - - Build documentation in strict mode in ``make distcheck``. - - -Version 2.0 -=========== - -Released 2011-02-28 - -This release is backwards incompatible with the 1.x release series. -See the chapter "Upgrading from older versions" in documentation for -details. - -* Backwards incompatible changes: - - - Unify unsigned integer usage in the API: All occurences of - unsigned int and unsigned long have been replaced with size_t. - - - Change JSON integer's underlying type to the widest signed integer - type available, i.e. long long if it's supported, otherwise long. - Add a typedef json_int_t that defines the type. - - - Change the maximum indentation depth to 31 spaces in encoder. This - frees up bits from the flags parameter of encoding functions - `json_dumpf()`, `json_dumps()` and `json_dump_file()`. - - - For future needs, add a flags parameter to all decoding functions - `json_loadf()`, `json_loads()` and `json_load_file()`. - -* New features - - - `json_pack()`, `json_pack_ex()`, `json_vpack_ex()`: Create JSON - values based on a format string. - - - `json_unpack()`, `json_unpack_ex()`, `json_vunpack_ex()`: Simple - value extraction and validation functionality based on a format - string. - - - Add column, position and source fields to the ``json_error_t`` - struct. - - - Enhance error reporting in the decoder. - - - ``JANSSON_VERSION`` et al.: Preprocessor constants that define the - library version. - - - `json_set_alloc_funcs()`: Set custom memory allocation functions. - -* Fix many portability issues, especially on Windows. - -* Configuration - - - Add file ``jansson_config.h`` that contains site specific - configuration. It's created automatically by the configure script, - or can be created by hand if the configure script cannot be used. - The file ``jansson_config.h.win32`` can be used without - modifications on Windows systems. - - - Add a section to documentation describing how to build Jansson on - Windows. - - - Documentation now requires Sphinx 1.0 or newer. - - -Version 1.3 -=========== - -Released 2010-06-13 - -* New functions: - - - `json_object_iter_set()`, `json_object_iter_set_new()`: Change - object contents while iterating over it. - - - `json_object_iter_at()`: Return an iterator that points to a - specific object item. - -* New encoding flags: - - - ``JSON_PRESERVE_ORDER``: Preserve the insertion order of object - keys. - -* Bug fixes: - - - Fix an error that occured when an array or object was first - encoded as empty, then populated with some data, and then - re-encoded - - - Fix the situation like above, but when the first encoding resulted - in an error - -* Documentation: - - - Clarify the documentation on reference stealing, providing an - example usage pattern - - -Version 1.2.1 -============= - -Released 2010-04-03 - -* Bug fixes: - - - Fix reference counting on ``true``, ``false`` and ``null`` - - Estimate real number underflows in decoder with 0.0 instead of - issuing an error - -* Portability: - - - Make ``int32_t`` available on all systems - - Support compilers that don't have the ``inline`` keyword - - Require Autoconf 2.60 (for ``int32_t``) - -* Tests: - - - Print test names correctly when ``VERBOSE=1`` - - ``test/suites/api``: Fail when a test fails - - Enhance tests for iterators - - Enhance tests for decoding texts that contain null bytes - -* Documentation: - - - Don't remove ``changes.rst`` in ``make clean`` - - Add a chapter on RFC conformance - - -Version 1.2 -=========== - -Released 2010-01-21 - -* New functions: - - - `json_equal()`: Test whether two JSON values are equal - - `json_copy()` and `json_deep_copy()`: Make shallow and deep copies - of JSON values - - Add a version of all functions taking a string argument that - doesn't check for valid UTF-8: `json_string_nocheck()`, - `json_string_set_nocheck()`, `json_object_set_nocheck()`, - `json_object_set_new_nocheck()` - -* New encoding flags: - - - ``JSON_SORT_KEYS``: Sort objects by key - - ``JSON_ENSURE_ASCII``: Escape all non-ASCII Unicode characters - - ``JSON_COMPACT``: Use a compact representation with all unneeded - whitespace stripped - -* Bug fixes: - - - Revise and unify whitespace usage in encoder: Add spaces between - array and object items, never append newline to output. - - Remove const qualifier from the ``json_t`` parameter in - `json_string_set()`, `json_integer_set()` and `json_real_set`. - - Use ``int32_t`` internally for representing Unicode code points - (int is not enough on all platforms) - -* Other changes: - - - Convert ``CHANGES`` (this file) to reStructured text and add it to - HTML documentation - - The test system has been refactored. Python is no longer required - to run the tests. - - Documentation can now be built by invoking ``make html`` - - Support for pkg-config - - -Version 1.1.3 -============= - -Released 2009-12-18 - -* Encode reals correctly, so that first encoding and then decoding a - real always produces the same value -* Don't export private symbols in ``libjansson.so`` - - -Version 1.1.2 -============= - -Released 2009-11-08 - -* Fix a bug where an error message was not produced if the input file - could not be opened in `json_load_file()` -* Fix an assertion failure in decoder caused by a minus sign without a - digit after it -* Remove an unneeded include of ``stdint.h`` in ``jansson.h`` - - -Version 1.1.1 -============= - -Released 2009-10-26 - -* All documentation files were not distributed with v1.1; build - documentation in make distcheck to prevent this in the future -* Fix v1.1 release date in ``CHANGES`` - - -Version 1.1 -=========== - -Released 2009-10-20 - -* API additions and improvements: - - - Extend array and object APIs - - Add functions to modify integer, real and string values - - Improve argument validation - - Use unsigned int instead of ``uint32_t`` for encoding flags - -* Enhance documentation - - - Add getting started guide and tutorial - - Fix some typos - - General clarifications and cleanup - -* Check for integer and real overflows and underflows in decoder -* Make singleton values thread-safe (``true``, ``false`` and ``null``) -* Enhance circular reference handling -* Don't define ``-std=c99`` in ``AM_CFLAGS`` -* Add C++ guards to ``jansson.h`` -* Minor performance and portability improvements -* Expand test coverage - - -Version 1.0.4 -============= - -Released 2009-10-11 - -* Relax Autoconf version requirement to 2.59 -* Make Jansson compile on platforms where plain ``char`` is unsigned -* Fix API tests for object - - -Version 1.0.3 -============= - -Released 2009-09-14 - -* Check for integer and real overflows and underflows in decoder -* Use the Python json module for tests, or simplejson if the json - module is not found -* Distribute changelog (this file) - - -Version 1.0.2 -============= - -Released 2009-09-08 - -* Handle EOF correctly in decoder - - -Version 1.0.1 -============= - -Released 2009-09-04 - -* Fixed broken `json_is_boolean()` - - -Version 1.0 -=========== - -Released 2009-08-25 - -* Initial release diff --git a/solo-ckpool-source/src/jansson-2.14/CMakeLists.txt b/solo-ckpool-source/src/jansson-2.14/CMakeLists.txt deleted file mode 100644 index 39b9ad3..0000000 --- a/solo-ckpool-source/src/jansson-2.14/CMakeLists.txt +++ /dev/null @@ -1,662 +0,0 @@ -cmake_minimum_required (VERSION 3.1) -project(jansson C) - -# Options -option(JANSSON_BUILD_SHARED_LIBS "Build shared libraries." OFF) -option(USE_URANDOM "Use /dev/urandom to seed the hash function." ON) -option(USE_WINDOWS_CRYPTOAPI "Use CryptGenRandom to seed the hash function." ON) - -if (MSVC) - # This option must match the settings used in your program, in particular if you - # are linking statically - option(JANSSON_STATIC_CRT "Link the static CRT libraries" OFF ) -endif () - -option(JANSSON_EXAMPLES "Compile example applications" ON) - -if (UNIX) - option(JANSSON_COVERAGE "(GCC Only! Requires gcov/lcov to be installed). Include target for doing coverage analysis for the test suite. Note that -DCMAKE_BUILD_TYPE=Debug must be set" OFF) -endif () - -# Set some nicer output dirs. -set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/bin) -set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib) -set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib) -set(JANSSON_TEMP_DIR ${CMAKE_CURRENT_BINARY_DIR}/tmp) - -# Give the debug version a different postfix for windows, -# so both the debug and release version can be built in the -# same build-tree on Windows (MSVC). -if (WIN32 AND NOT CMAKE_DEBUG_POSTFIX) - set(CMAKE_DEBUG_POSTFIX "_d") -endif() - -# This is how I thought it should go -# set (JANSSON_VERSION "2.3.1") -# set (JANSSON_SOVERSION 2) - -set(JANSSON_DISPLAY_VERSION "2.14") - -# This is what is required to match the same numbers as automake's -set(JANSSON_VERSION "4.14.0") -set(JANSSON_SOVERSION 4) - -# for CheckFunctionKeywords -set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") - -include (CheckCSourceCompiles) -include (CheckFunctionExists) -include (CheckFunctionKeywords) -include (CheckIncludeFiles) -include (CheckTypeSize) - -# suppress format-truncation warning -include (CheckCCompilerFlag) -check_c_compiler_flag(-Wno-format-truncation HAS_NO_FORMAT_TRUNCATION) -if (HAS_NO_FORMAT_TRUNCATION) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-format-truncation") -endif() - -if (MSVC) - # Turn off Microsofts "security" warnings. - add_definitions( "/W3 /D_CRT_SECURE_NO_WARNINGS /wd4005 /wd4996 /nologo" ) - - if (JANSSON_STATIC_CRT) - set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /MT") - set(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /MTd") - endif() -endif() - -message("C compiler: ${CMAKE_C_COMPILER_ID}") - -if (JANSSON_COVERAGE) - include(CodeCoverage) - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O0 -fprofile-arcs -ftest-coverage") -endif() - -check_include_files (endian.h HAVE_ENDIAN_H) -check_include_files (fcntl.h HAVE_FCNTL_H) -check_include_files (sched.h HAVE_SCHED_H) -check_include_files (unistd.h HAVE_UNISTD_H) -check_include_files (sys/param.h HAVE_SYS_PARAM_H) -check_include_files (sys/stat.h HAVE_SYS_STAT_H) -check_include_files (sys/time.h HAVE_SYS_TIME_H) -check_include_files (sys/types.h HAVE_SYS_TYPES_H) - -check_function_exists (close HAVE_CLOSE) -check_function_exists (getpid HAVE_GETPID) -check_function_exists (gettimeofday HAVE_GETTIMEOFDAY) -check_function_exists (open HAVE_OPEN) -check_function_exists (read HAVE_READ) -check_function_exists (sched_yield HAVE_SCHED_YIELD) - -# Check for the int-type includes -check_include_files (stdint.h HAVE_STDINT_H) - -# Check our 64 bit integer sizes -check_type_size (__int64 __INT64) -check_type_size (int64_t INT64_T) -check_type_size ("long long" LONG_LONG_INT) - -# Check our 32 bit integer sizes -check_type_size (int32_t INT32_T) -check_type_size (__int32 __INT32) -check_type_size ("long" LONG_INT) -check_type_size ("int" INT) -if (HAVE_INT32_T) - set (JSON_INT32 int32_t) -elseif (HAVE___INT32) - set (JSON_INT32 __int32) -elseif (HAVE_LONG_INT AND (LONG_INT EQUAL 4)) - set (JSON_INT32 long) -elseif (HAVE_INT AND (INT EQUAL 4)) - set (JSON_INT32 int) -else () - message (FATAL_ERROR "Could not detect a valid 32-bit integer type") -endif () - -check_type_size ("unsigned long" UNSIGNED_LONG_INT) -check_type_size ("unsigned int" UNSIGNED_INT) -check_type_size ("unsigned short" UNSIGNED_SHORT) - -check_type_size (uint32_t UINT32_T) -check_type_size (__uint32 __UINT32) -if (HAVE_UINT32_T) - set (JSON_UINT32 uint32_t) -elseif (HAVE___UINT32) - set (JSON_UINT32 __uint32) -elseif (HAVE_UNSIGNED_LONG_INT AND (UNSIGNED_LONG_INT EQUAL 4)) - set (JSON_UINT32 "unsigned long") -elseif (HAVE_UNSIGNED_INT AND (UNSIGNED_INT EQUAL 4)) - set (JSON_UINT32 "unsigned int") -else () - message (FATAL_ERROR "Could not detect a valid unsigned 32-bit integer type") -endif () - -check_type_size (uint16_t UINT16_T) -check_type_size (__uint16 __UINT16) -if (HAVE_UINT16_T) - set (JSON_UINT16 uint16_t) -elseif (HAVE___UINT16) - set (JSON_UINT16 __uint16) -elseif (HAVE_UNSIGNED_INT AND (UNSIGNED_INT EQUAL 2)) - set (JSON_UINT16 "unsigned int") -elseif (HAVE_UNSIGNED_SHORT AND (UNSIGNED_SHORT EQUAL 2)) - set (JSON_UINT16 "unsigned short") -else () - message (FATAL_ERROR "Could not detect a valid unsigned 16-bit integer type") -endif () - -check_type_size (uint8_t UINT8_T) -check_type_size (__uint8 __UINT8) -if (HAVE_UINT8_T) - set (JSON_UINT8 uint8_t) -elseif (HAVE___UINT8) - set (JSON_UINT8 __uint8) -else () - set (JSON_UINT8 "unsigned char") -endif () - -# Check for ssize_t and SSIZE_T existence. -check_type_size(ssize_t SSIZE_T) -check_type_size(SSIZE_T UPPERCASE_SSIZE_T) -if(NOT HAVE_SSIZE_T) - if(HAVE_UPPERCASE_SSIZE_T) - set(JSON_SSIZE SSIZE_T) - else() - set(JSON_SSIZE int) - endif() -endif() -set(CMAKE_EXTRA_INCLUDE_FILES "") - -# Check for all the variants of strtoll -check_function_exists (strtoll HAVE_STRTOLL) -check_function_exists (strtoq HAVE_STRTOQ) -check_function_exists (_strtoi64 HAVE__STRTOI64) - -# Figure out what variant we should use -if (HAVE_STRTOLL) - set (JSON_STRTOINT strtoll) -elseif (HAVE_STRTOQ) - set (JSON_STRTOINT strtoq) -elseif (HAVE__STRTOI64) - set (JSON_STRTOINT _strtoi64) -else () - # fallback to strtol (32 bit) - # this will set all the required variables - set (JSON_STRTOINT strtol) - set (JSON_INT_T long) - set (JSON_INTEGER_FORMAT "\"ld\"") -endif () - -# if we haven't defined JSON_INT_T, then we have a 64 bit conversion function. -# detect what to use for the 64 bit type. -# Note: I will prefer long long if I can get it, as that is what the automake system aimed for. -if (NOT DEFINED JSON_INT_T) - if (HAVE_LONG_LONG_INT AND (LONG_LONG_INT EQUAL 8)) - set (JSON_INT_T "long long") - elseif (HAVE_INT64_T) - set (JSON_INT_T int64_t) - elseif (HAVE___INT64) - set (JSON_INT_T __int64) - else () - message (FATAL_ERROR "Could not detect 64 bit type, although I detected the strtoll equivalent") - endif () - - # Apparently, Borland BCC and MSVC wants I64d, - # Borland BCC could also accept LD - # and gcc wants ldd, - # I am not sure what cygwin will want, so I will assume I64d - - if (WIN32) # matches both msvc and cygwin - set (JSON_INTEGER_FORMAT "\"I64d\"") - else () - set (JSON_INTEGER_FORMAT "\"lld\"") - endif () -endif () - - -# If locale.h and localeconv() are available, define to 1, otherwise to 0. -check_include_files (locale.h HAVE_LOCALE_H) -check_function_exists (localeconv HAVE_LOCALECONV) - -if (HAVE_LOCALECONV AND HAVE_LOCALE_H) - set (JSON_HAVE_LOCALECONV 1) -else () - set (JSON_HAVE_LOCALECONV 0) -endif() - -# check if we have setlocale -check_function_exists(setlocale HAVE_SETLOCALE) - -# Check what the inline keyword is. -# Note that the original JSON_INLINE was always set to just 'inline', so this goes further. -check_function_keywords("inline") -check_function_keywords("__inline") -check_function_keywords("__inline__") - -if (HAVE_INLINE) - set(JSON_INLINE inline) -elseif (HAVE___INLINE) - set(JSON_INLINE __inline) -elseif (HAVE___INLINE__) - set(JSON_INLINE __inline__) -else() - # no inline on this platform - set (JSON_INLINE) -endif() - -check_c_source_compiles ("int main() { unsigned long val; __sync_bool_compare_and_swap(&val, 0, 1); __sync_add_and_fetch(&val, 1); __sync_sub_and_fetch(&val, 1); return 0; } " HAVE_SYNC_BUILTINS) -check_c_source_compiles ("int main() { char l; unsigned long v; __atomic_test_and_set(&l, __ATOMIC_RELAXED); __atomic_store_n(&v, 1, __ATOMIC_RELEASE); __atomic_load_n(&v, __ATOMIC_ACQUIRE); __atomic_add_fetch(&v, 1, __ATOMIC_ACQUIRE); __atomic_sub_fetch(&v, 1, __ATOMIC_RELEASE); return 0; }" HAVE_ATOMIC_BUILTINS) - -if (HAVE_SYNC_BUILTINS) - set(JSON_HAVE_SYNC_BUILTINS 1) -else() - set(JSON_HAVE_SYNC_BUILTINS 0) -endif() - -if (HAVE_ATOMIC_BUILTINS) - set(JSON_HAVE_ATOMIC_BUILTINS 1) -else() - set(JSON_HAVE_ATOMIC_BUILTINS 0) -endif() - -set (JANSSON_INITIAL_HASHTABLE_ORDER 3 CACHE STRING "Number of buckets new object hashtables contain is 2 raised to this power. The default is 3, so empty hashtables contain 2^3 = 8 buckets.") - -# configure the public config file -configure_file (${CMAKE_CURRENT_SOURCE_DIR}/cmake/jansson_config.h.cmake - ${CMAKE_CURRENT_BINARY_DIR}/include/jansson_config.h) - -# Copy the jansson.h file to the public include folder -file (COPY ${CMAKE_CURRENT_SOURCE_DIR}/src/jansson.h - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/include/) - -add_definitions(-DJANSSON_USING_CMAKE) - -# configure the private config file -configure_file (${CMAKE_CURRENT_SOURCE_DIR}/cmake/jansson_private_config.h.cmake - ${CMAKE_CURRENT_BINARY_DIR}/private_include/jansson_private_config.h) - -# and tell the source code to include it -add_definitions(-DHAVE_CONFIG_H) - -include_directories (${CMAKE_CURRENT_BINARY_DIR}/include) -include_directories (${CMAKE_CURRENT_BINARY_DIR}/private_include) - -# Add the lib sources. -file(GLOB JANSSON_SRC src/*.c) - -set(JANSSON_HDR_PRIVATE - ${CMAKE_CURRENT_SOURCE_DIR}/src/hashtable.h - ${CMAKE_CURRENT_SOURCE_DIR}/src/jansson_private.h - ${CMAKE_CURRENT_SOURCE_DIR}/src/strbuffer.h - ${CMAKE_CURRENT_SOURCE_DIR}/src/utf.h - ${CMAKE_CURRENT_BINARY_DIR}/private_include/jansson_private_config.h) - -set(JANSSON_HDR_PUBLIC - ${CMAKE_CURRENT_BINARY_DIR}/include/jansson_config.h - ${CMAKE_CURRENT_SOURCE_DIR}/src/jansson.h) - -source_group("Library Sources" FILES ${JANSSON_SRC}) -source_group("Library Private Headers" FILES ${JANSSON_HDR_PRIVATE}) -source_group("Library Public Headers" FILES ${JANSSON_HDR_PUBLIC}) - -if(JANSSON_BUILD_SHARED_LIBS) - add_library(jansson SHARED - ${JANSSON_SRC} - ${JANSSON_HDR_PRIVATE} - ${JANSSON_HDR_PUBLIC} - src/jansson.def) - -# check if linker support --default-symver - list(APPEND CMAKE_REQUIRED_LIBRARIES "-Wl,--default-symver") - check_c_source_compiles( - " - int main (void) - { - return 0; - } - " - DSYMVER_WORKS - ) - list(REMOVE_ITEM CMAKE_REQUIRED_LIBRARIES "-Wl,--default-symver") - - if (SYMVER_WORKS) - set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--default-symver") - else() -# some linkers may only support --version-script - file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/jansson.sym" "JANSSON_${JANSSON_SOVERSION} { - global: - *; -}; -") - list(APPEND CMAKE_REQUIRED_LIBRARIES "-Wl,--version-script,${CMAKE_CURRENT_BINARY_DIR}/jansson.sym") - check_c_source_compiles( - " - int main (void) - { - return 0; - } - " - VSCRIPT_WORKS - ) - list(REMOVE_ITEM CMAKE_REQUIRED_LIBRARIES "-Wl,--version-script,${CMAKE_CURRENT_BINARY_DIR}/jansson.sym") - if (VSCRIPT_WORKS) - set(CMAKE_SHARED_LINKER_FLAGS "-Wl,--version-script,${CMAKE_CURRENT_BINARY_DIR}/jansson.sym") - endif() - endif() - - set_target_properties(jansson PROPERTIES - VERSION ${JANSSON_VERSION} - SOVERSION ${JANSSON_SOVERSION}) -else() - add_library(jansson STATIC - ${JANSSON_SRC} - ${JANSSON_HDR_PRIVATE} - ${JANSSON_HDR_PUBLIC}) - set_target_properties(jansson PROPERTIES - POSITION_INDEPENDENT_CODE true) -endif() - -if (JANSSON_EXAMPLES) - add_executable(simple_parse "${CMAKE_CURRENT_SOURCE_DIR}/examples/simple_parse.c") - target_link_libraries(simple_parse jansson) -endif() - -# For building Documentation (uses Sphinx) -option(JANSSON_BUILD_DOCS "Build documentation (uses python-sphinx)." ON) -if (JANSSON_BUILD_DOCS) - find_package(Sphinx) - - if (NOT SPHINX_FOUND) - message(WARNING "Sphinx not found. Cannot generate documentation! - Set -DJANSSON_BUILD_DOCS=OFF to get rid of this message.") - else() - if (Sphinx_VERSION_STRING VERSION_LESS 1.0) - message(WARNING "Your Sphinx version is too old! - This project requires Sphinx v1.0 or above to produce - proper documentation (you have v${Sphinx_VERSION_STRING}). - You will get output but it will have errors.") - endif() - - # configured documentation tools and intermediate build results - set(BINARY_BUILD_DIR "${CMAKE_CURRENT_BINARY_DIR}/_build") - - # Sphinx cache with pickled ReST documents - set(SPHINX_CACHE_DIR "${CMAKE_CURRENT_BINARY_DIR}/_doctrees") - - # CMake could be used to build the conf.py file too, - # eg it could automatically write the version of the program or change the theme. - # if(NOT DEFINED SPHINX_THEME) - # set(SPHINX_THEME default) - # endif() - # - # if(NOT DEFINED SPHINX_THEME_DIR) - # set(SPHINX_THEME_DIR) - # endif() - # - # configure_file( - # "${CMAKE_CURRENT_SOURCE_DIR}/conf.py.in" - # "${BINARY_BUILD_DIR}/conf.py" - # @ONLY) - - # TODO: Add support for all sphinx builders: http://sphinx-doc.org/builders.html - - # Add documentation targets. - set(DOC_TARGETS html) - - option(JANSSON_BUILD_MAN "Create a target for building man pages." ON) - - if (JANSSON_BUILD_MAN) - if (Sphinx_VERSION_STRING VERSION_LESS 1.0) - message(WARNING "Sphinx version 1.0 > is required to build man pages. You have v${Sphinx_VERSION_STRING}.") - else() - list(APPEND DOC_TARGETS man) - endif() - endif() - - option(JANSSON_BUILD_LATEX "Create a target for building latex docs (to create PDF)." OFF) - - if (JANSSON_BUILD_LATEX) - find_package(LATEX) - - if (NOT LATEX_COMPILER) - message("Couldn't find Latex, can't build latex docs using Sphinx") - else() - message("Latex found! If you have problems building, see Sphinx documentation for required Latex packages.") - list(APPEND DOC_TARGETS latex) - endif() - endif() - - # The doc target will build all documentation targets. - add_custom_target(doc) - - foreach (DOC_TARGET ${DOC_TARGETS}) - add_custom_target(${DOC_TARGET} - ${SPHINX_EXECUTABLE} - # -q # Enable for quiet mode - -b ${DOC_TARGET} - -d "${SPHINX_CACHE_DIR}" - # -c "${BINARY_BUILD_DIR}" # enable if using cmake-generated conf.py - "${CMAKE_CURRENT_SOURCE_DIR}/doc" - "${CMAKE_CURRENT_BINARY_DIR}/doc/${DOC_TARGET}" - COMMENT "Building ${DOC_TARGET} documentation with Sphinx") - - add_dependencies(doc ${DOC_TARGET}) - endforeach() - - message("Building documentation enabled for: ${DOC_TARGETS}") - endif() -endif () - - -option(JANSSON_WITHOUT_TESTS "Don't build tests ('make test' to execute tests)" OFF) - -if (NOT JANSSON_WITHOUT_TESTS) - option(JANSSON_TEST_WITH_VALGRIND "Enable valgrind tests." OFF) - - ENABLE_TESTING() - - if (JANSSON_TEST_WITH_VALGRIND) - # TODO: Add FindValgrind.cmake instead of having a hardcoded path. - - add_definitions(-DVALGRIND) - - # enable valgrind - set(CMAKE_MEMORYCHECK_COMMAND valgrind) - set(CMAKE_MEMORYCHECK_COMMAND_OPTIONS - "--error-exitcode=1 --leak-check=full --show-reachable=yes --track-origins=yes -q") - - set(MEMCHECK_COMMAND - "${CMAKE_MEMORYCHECK_COMMAND} ${CMAKE_MEMORYCHECK_COMMAND_OPTIONS}") - separate_arguments(MEMCHECK_COMMAND) - endif () - - # - # Test suites. - # - if (CMAKE_COMPILER_IS_GNUCC) - add_definitions(-Wall -Wextra -Wdeclaration-after-statement) - endif () - - set(api_tests - test_array - test_chaos - test_copy - test_dump - test_dump_callback - test_equal - test_fixed_size - test_load - test_load_callback - test_loadb - test_number - test_object - test_pack - test_simple - test_sprintf - test_unpack) - - # Doing arithmetic on void pointers is not allowed by Microsofts compiler - # such as secure_malloc and secure_free is doing, so exclude it for now. - if (NOT MSVC) - list(APPEND api_tests test_memory_funcs) - endif() - - # Helper macro for building and linking a test program. - macro(build_testprog name dir) - add_executable(${name} ${dir}/${name}.c) - add_dependencies(${name} jansson) - target_link_libraries(${name} jansson) - endmacro(build_testprog) - - # Create executables and tests/valgrind tests for API tests. - foreach (test ${api_tests}) - build_testprog(${test} ${CMAKE_CURRENT_SOURCE_DIR}/test/suites/api) - - if (JANSSON_TEST_WITH_VALGRIND) - add_test(memcheck__${test} - ${MEMCHECK_COMMAND} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${test} - WORKING_DIRECTORY ${JANSSON_TEMP_DIR}) - else() - add_test(${test} - ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${test} - WORKING_DIRECTORY ${JANSSON_TEMP_DIR}) - endif () - endforeach () - - # Test harness for the suites tests. - build_testprog(json_process ${CMAKE_CURRENT_SOURCE_DIR}/test/bin) - - set(SUITE_TEST_CMD ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/json_process) - set(SUITES encoding-flags valid invalid invalid-unicode) - foreach (SUITE ${SUITES}) - file(GLOB TESTDIRS test/suites/${SUITE}/*) - - foreach (TESTDIR ${TESTDIRS}) - if (IS_DIRECTORY ${TESTDIR}) - get_filename_component(TNAME ${TESTDIR} NAME) - - if (JANSSON_TEST_WITH_VALGRIND) - add_test(memcheck__${SUITE}__${TNAME} - ${MEMCHECK_COMMAND} ${SUITE_TEST_CMD} ${TESTDIR}) - else() - add_test(${SUITE}__${TNAME} - ${SUITE_TEST_CMD} ${TESTDIR}) - endif() - - if ((${SUITE} STREQUAL "valid" OR ${SUITE} STREQUAL "invalid") AND NOT EXISTS ${TESTDIR}/nostrip) - if (JANSSON_TEST_WITH_VALGRIND) - add_test(memcheck__${SUITE}__${TNAME}__strip - ${MEMCHECK_COMMAND} ${SUITE_TEST_CMD} --strip ${TESTDIR}) - else() - add_test(${SUITE}__${TNAME}__strip - ${SUITE_TEST_CMD} --strip ${TESTDIR}) - endif() - endif () - endif () - endforeach () - endforeach () - - if (JANSSON_COVERAGE) - SETUP_TARGET_FOR_COVERAGE(coverage coverage ctest) - endif () - - # Enable using "make check" just like the autotools project. - # By default cmake creates a target "make test" - add_custom_target(check COMMAND ${CMAKE_CTEST_COMMAND} - DEPENDS json_process ${api_tests}) -endif () - -# -# Installation preparation. -# - -# Allow the user to override installation directories. -set(JANSSON_INSTALL_LIB_DIR lib CACHE PATH "Installation directory for libraries") -set(JANSSON_INSTALL_BIN_DIR bin CACHE PATH "Installation directory for executables") -set(JANSSON_INSTALL_INCLUDE_DIR include CACHE PATH "Installation directory for header files") - -if(WIN32 AND NOT CYGWIN) - set(DEF_INSTALL_CMAKE_DIR cmake) -else() - set(DEF_INSTALL_CMAKE_DIR lib/cmake/jansson) -endif() - -set(JANSSON_INSTALL_CMAKE_DIR ${DEF_INSTALL_CMAKE_DIR} CACHE PATH "Installation directory for CMake files") - -# Create pkg-conf file. -# (We use the same files as ./configure does, so we -# have to defined the same variables used there). -set(prefix ${CMAKE_INSTALL_PREFIX}) -set(exec_prefix "\${prefix}") -set(libdir "\${exec_prefix}/${JANSSON_INSTALL_LIB_DIR}") -set(includedir "\${prefix}/${JANSSON_INSTALL_INCLUDE_DIR}") -set(VERSION ${JANSSON_DISPLAY_VERSION}) -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/jansson.pc.in - ${CMAKE_CURRENT_BINARY_DIR}/jansson.pc @ONLY) - -# Make sure the paths are relative. -foreach(p LIB BIN INCLUDE CMAKE) - set(var JANSSON_INSTALL_${p}_DIR) -endforeach() - -# Generate the config file for the build-tree. -set(JANSSON__INCLUDE_DIRS - "${CMAKE_CURRENT_SOURCE_DIR}/include" - "${CMAKE_CURRENT_BINARY_DIR}/include") -set(JANSSON_INCLUDE_DIRS ${JANSSON__INCLUDE_DIRS} CACHE PATH "Jansson include directories") -configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cmake/janssonConfig.cmake.in - ${CMAKE_CURRENT_BINARY_DIR}/janssonConfig.cmake - @ONLY) - - -# Generate the config file for the installation tree. -include(CMakePackageConfigHelpers) - -write_basic_package_version_file( - "${CMAKE_CURRENT_BINARY_DIR}/cmake/janssonConfigVersion.cmake" - VERSION ${JANSSON_VERSION} - COMPATIBILITY ExactVersion -) - -configure_package_config_file( - "cmake/janssonConfig.cmake.in" - "${CMAKE_CURRENT_BINARY_DIR}/cmake/janssonConfig.cmake" - INSTALL_DESTINATION "${JANSSON_INSTALL_CMAKE_DIR}" -) - -# -# Install targets. -# -option(JANSSON_INSTALL "Generate installation target" ON) -if (JANSSON_INSTALL) - install(TARGETS jansson - EXPORT janssonTargets - LIBRARY DESTINATION "lib" - ARCHIVE DESTINATION "lib" - RUNTIME DESTINATION "bin" - INCLUDES DESTINATION "include") - - install(FILES ${JANSSON_HDR_PUBLIC} - DESTINATION "include") - - # Install the pkg-config. - install(FILES - ${CMAKE_CURRENT_BINARY_DIR}/jansson.pc - DESTINATION lib/pkgconfig) - - # Install the configs. - install(FILES - ${CMAKE_CURRENT_BINARY_DIR}/cmake/janssonConfig.cmake - ${CMAKE_CURRENT_BINARY_DIR}/cmake/janssonConfigVersion.cmake - DESTINATION "${JANSSON_INSTALL_CMAKE_DIR}") - - # Install exports for the install-tree. - install(EXPORT janssonTargets - NAMESPACE jansson:: - DESTINATION "${JANSSON_INSTALL_CMAKE_DIR}") -endif() - -# For use when simply using add_library from a parent project to build jansson. -set(JANSSON_LIBRARIES jansson CACHE STRING "jansson libraries") diff --git a/solo-ckpool-source/src/jansson-2.14/LICENSE b/solo-ckpool-source/src/jansson-2.14/LICENSE deleted file mode 100644 index 483459c..0000000 --- a/solo-ckpool-source/src/jansson-2.14/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2009-2020 Petri Lehtinen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/solo-ckpool-source/src/jansson-2.14/Makefile.am b/solo-ckpool-source/src/jansson-2.14/Makefile.am deleted file mode 100644 index bbeefbd..0000000 --- a/solo-ckpool-source/src/jansson-2.14/Makefile.am +++ /dev/null @@ -1,13 +0,0 @@ -ACLOCAL_AMFLAGS = -I m4 -EXTRA_DIST = CHANGES LICENSE README.rst CMakeLists.txt cmake android examples scripts -SUBDIRS = doc src - -# "make distcheck" builds the dvi target, so use it to check that the -# documentation is built correctly. -dvi: - $(MAKE) SPHINXOPTS_EXTRA=-W html - -pkgconfigdir = $(libdir)/pkgconfig -pkgconfig_DATA = jansson.pc - -TESTS = scripts/clang-format-check diff --git a/solo-ckpool-source/src/jansson-2.14/README.rst b/solo-ckpool-source/src/jansson-2.14/README.rst deleted file mode 100644 index 83fc89a..0000000 --- a/solo-ckpool-source/src/jansson-2.14/README.rst +++ /dev/null @@ -1,81 +0,0 @@ -Jansson README -============== - -.. image:: https://github.com/akheron/jansson/workflows/tests/badge.svg - :target: https://github.com/akheron/jansson/actions - -.. image:: https://ci.appveyor.com/api/projects/status/lmhkkc4q8cwc65ko - :target: https://ci.appveyor.com/project/akheron/jansson - -.. image:: https://coveralls.io/repos/akheron/jansson/badge.png?branch=master - :target: https://coveralls.io/r/akheron/jansson?branch=master - -Jansson_ is a C library for encoding, decoding and manipulating JSON -data. Its main features and design principles are: - -- Simple and intuitive API and data model - -- `Comprehensive documentation`_ - -- No dependencies on other libraries - -- Full Unicode support (UTF-8) - -- Extensive test suite - -Jansson is licensed under the `MIT license`_; see LICENSE in the -source distribution for details. - - -Compilation and Installation ----------------------------- - -You can download and install Jansson using the `vcpkg `_ dependency manager: - -.. code-block:: bash - - git clone https://github.com/Microsoft/vcpkg.git - cd vcpkg - ./bootstrap-vcpkg.sh - ./vcpkg integrate install - vcpkg install jansson - -The Jansson port in vcpkg is kept up to date by Microsoft team members and community contributors. If the version is out of date, please `create an issue or pull request `_ on the vcpkg repository. - -If you obtained a `source tarball`_ from the "Releases" section of the main -site just use the standard autotools commands:: - - $ ./configure - $ make - $ make install - -To run the test suite, invoke:: - - $ make check - -If the source has been checked out from a Git repository, the -./configure script has to be generated first. The easiest way is to -use autoreconf:: - - $ autoreconf -i - - -Documentation -------------- - -Documentation is available at http://jansson.readthedocs.io/en/latest/. - -The documentation source is in the ``doc/`` subdirectory. To generate -HTML documentation, invoke:: - - $ make html - -Then, point your browser to ``doc/_build/html/index.html``. Sphinx_ -1.0 or newer is required to generate the documentation. - - -.. _Jansson: http://www.digip.org/jansson/ -.. _`Comprehensive documentation`: http://jansson.readthedocs.io/en/latest/ -.. _`MIT license`: http://www.opensource.org/licenses/mit-license.php -.. _`source tarball`: http://www.digip.org/jansson#releases -.. _Sphinx: http://sphinx.pocoo.org/ diff --git a/solo-ckpool-source/src/jansson-2.14/android/jansson_config.h b/solo-ckpool-source/src/jansson-2.14/android/jansson_config.h deleted file mode 100644 index 618a0da..0000000 --- a/solo-ckpool-source/src/jansson-2.14/android/jansson_config.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright (c) 2010-2016 Petri Lehtinen - * - * Jansson is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - * - * - * This file specifies a part of the site-specific configuration for - * Jansson, namely those things that affect the public API in - * jansson.h. - * - * The configure script copies this file to jansson_config.h and - * replaces @var@ substitutions by values that fit your system. If you - * cannot run the configure script, you can do the value substitution - * by hand. - */ - -#ifndef JANSSON_CONFIG_H -#define JANSSON_CONFIG_H - -/* If your compiler supports the inline keyword in C, JSON_INLINE is - defined to `inline', otherwise empty. In C++, the inline is always - supported. */ -#ifdef __cplusplus -#define JSON_INLINE inline -#else -#define JSON_INLINE inline -#endif - -/* If your compiler supports the `long long` type and the strtoll() - library function, JSON_INTEGER_IS_LONG_LONG is defined to 1, - otherwise to 0. */ -#define JSON_INTEGER_IS_LONG_LONG 1 - -/* If locale.h and localeconv() are available, define to 1, - otherwise to 0. */ -#define JSON_HAVE_LOCALECONV 0 - -/* Maximum recursion depth for parsing JSON input. - This limits the depth of e.g. array-within-array constructions. */ -#define JSON_PARSER_MAX_DEPTH 2048 - -#endif diff --git a/solo-ckpool-source/src/jansson-2.14/cmake/CheckFunctionKeywords.cmake b/solo-ckpool-source/src/jansson-2.14/cmake/CheckFunctionKeywords.cmake deleted file mode 100644 index 44601fd..0000000 --- a/solo-ckpool-source/src/jansson-2.14/cmake/CheckFunctionKeywords.cmake +++ /dev/null @@ -1,15 +0,0 @@ -include(CheckCSourceCompiles) - -macro(check_function_keywords _wordlist) - set(${_result} "") - foreach(flag ${_wordlist}) - string(REGEX REPLACE "[-+/ ()]" "_" flagname "${flag}") - string(TOUPPER "${flagname}" flagname) - set(have_flag "HAVE_${flagname}") - check_c_source_compiles("${flag} void func(); void func() { } int main() { func(); return 0; }" ${have_flag}) - if(${have_flag} AND NOT ${_result}) - set(${_result} "${flag}") -# break() - endif(${have_flag} AND NOT ${_result}) - endforeach(flag) -endmacro(check_function_keywords) diff --git a/solo-ckpool-source/src/jansson-2.14/cmake/CodeCoverage.cmake b/solo-ckpool-source/src/jansson-2.14/cmake/CodeCoverage.cmake deleted file mode 100644 index 3a21d3d..0000000 --- a/solo-ckpool-source/src/jansson-2.14/cmake/CodeCoverage.cmake +++ /dev/null @@ -1,163 +0,0 @@ -# -# Boost Software License - Version 1.0 - August 17th, 2003 -# -# Permission is hereby granted, free of charge, to any person or organization -# obtaining a copy of the software and accompanying documentation covered by -# this license (the "Software") to use, reproduce, display, distribute, -# execute, and transmit the Software, and to prepare derivative works of the -# Software, and to permit third-parties to whom the Software is furnished to -# do so, all subject to the following: -# -# The copyright notices in the Software and this entire statement, including -# the above license grant, this restriction and the following disclaimer, -# must be included in all copies of the Software, in whole or in part, and -# all derivative works of the Software, unless such copies or derivative -# works are solely in the form of machine-executable object code generated by -# a source language processor. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -# SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -# FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -# DEALINGS IN THE SOFTWARE. -# -# 2012-01-31, Lars Bilke -# - Enable Code Coverage -# -# 2013-09-17, Joakim Söderberg -# - Added support for Clang. -# - Some additional usage instructions. -# -# USAGE: -# 1. Copy this file into your cmake modules path. -# -# 2. Add the following line to your CMakeLists.txt: -# INCLUDE(CodeCoverage) -# -# 3. Set compiler flags to turn off optimization and enable coverage: -# SET(CMAKE_CXX_FLAGS "-g -O0 -fprofile-arcs -ftest-coverage") -# SET(CMAKE_C_FLAGS "-g -O0 -fprofile-arcs -ftest-coverage") -# -# 3. Use the function SETUP_TARGET_FOR_COVERAGE to create a custom make target -# which runs your test executable and produces a lcov code coverage report: -# Example: -# SETUP_TARGET_FOR_COVERAGE( -# my_coverage_target # Name for custom target. -# test_driver # Name of the test driver executable that runs the tests. -# # NOTE! This should always have a ZERO as exit code -# # otherwise the coverage generation will not complete. -# coverage # Name of output directory. -# ) -# -# 4. Build a Debug build: -# cmake -DCMAKE_BUILD_TYPE=Debug .. -# make -# make my_coverage_target -# -# - -# Check prereqs -FIND_PROGRAM( GCOV_PATH gcov ) -FIND_PROGRAM( LCOV_PATH lcov ) -FIND_PROGRAM( GENHTML_PATH genhtml ) -FIND_PROGRAM( GCOVR_PATH gcovr PATHS ${CMAKE_SOURCE_DIR}/tests) - -IF(NOT GCOV_PATH) - MESSAGE(FATAL_ERROR "gcov not found! Aborting...") -ENDIF() # NOT GCOV_PATH - -IF(NOT (CMAKE_COMPILER_IS_GNUCXX OR CMAKE_COMPILER_IS_GNUCC)) - # Clang version 3.0.0 and greater now supports gcov as well. - MESSAGE(WARNING "Compiler is not GNU gcc! Clang Version 3.0.0 and greater supports gcov as well, but older versions don't.") - - IF(NOT ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang" OR "${CMAKE_C_COMPILER_ID}" STREQUAL "Clang")) - MESSAGE(FATAL_ERROR "Compiler is not GNU gcc or Clang! Aborting...") - ENDIF() -ENDIF() # NOT CMAKE_COMPILER_IS_GNUCXX - -IF ( NOT CMAKE_BUILD_TYPE STREQUAL "Debug" ) - MESSAGE( WARNING "Code coverage results with an optimized (non-Debug) build may be misleading" ) -ENDIF() # NOT CMAKE_BUILD_TYPE STREQUAL "Debug" - - -# Param _targetname The name of new the custom make target -# Param _outputname lcov output is generated as _outputname.info -# HTML report is generated in _outputname/index.html -# Param _testrunner The name of the target which runs the tests. -# MUST return ZERO always, even on errors. -# If not, no coverage report will be created! -# Optional fourth parameter is passed as arguments to _testrunner -# Pass them in list form, e.g.: "-j;2" for -j 2 -FUNCTION(SETUP_TARGET_FOR_COVERAGE _targetname _outputname _testrunner) - - IF(NOT LCOV_PATH) - MESSAGE(FATAL_ERROR "lcov not found! Aborting...") - ENDIF() # NOT LCOV_PATH - - IF(NOT GENHTML_PATH) - MESSAGE(FATAL_ERROR "genhtml not found! Aborting...") - ENDIF() # NOT GENHTML_PATH - - # Setup target - ADD_CUSTOM_TARGET(${_targetname} - - # Cleanup lcov - ${LCOV_PATH} --directory . --zerocounters - - # Run tests - COMMAND ${_testrunner} ${ARGV3} - - # Capturing lcov counters and generating report - COMMAND ${LCOV_PATH} --directory . --capture --output-file ${_outputname}.info --rc lcov_branch_coverage=1 - COMMAND ${LCOV_PATH} --remove ${_outputname}.info '*/build/include/*' '*/test/*' '/usr/include/*' --output-file ${_outputname}.info --rc lcov_branch_coverage=1 - # COMMAND ${GENHTML_PATH} --branch-coverage -o ${_outputname} ${_outputname}.info.cleaned - # COMMAND ${CMAKE_COMMAND} -E remove ${_outputname}.info ${_outputname}.info.cleaned - - WORKING_DIRECTORY ${CMAKE_BINARY_DIR} - COMMENT "Resetting code coverage counters to zero.\nProcessing code coverage counters and generating report." - ) - - # Show info where to find the report - ADD_CUSTOM_COMMAND(TARGET ${_targetname} POST_BUILD - COMMAND ; - COMMENT "Open ./${_outputname}/index.html in your browser to view the coverage report." - ) - -ENDFUNCTION() # SETUP_TARGET_FOR_COVERAGE - -# Param _targetname The name of new the custom make target -# Param _testrunner The name of the target which runs the tests -# Param _outputname cobertura output is generated as _outputname.xml -# Optional fourth parameter is passed as arguments to _testrunner -# Pass them in list form, e.g.: "-j;2" for -j 2 -FUNCTION(SETUP_TARGET_FOR_COVERAGE_COBERTURA _targetname _testrunner _outputname) - - IF(NOT PYTHON_EXECUTABLE) - MESSAGE(FATAL_ERROR "Python not found! Aborting...") - ENDIF() # NOT PYTHON_EXECUTABLE - - IF(NOT GCOVR_PATH) - MESSAGE(FATAL_ERROR "gcovr not found! Aborting...") - ENDIF() # NOT GCOVR_PATH - - ADD_CUSTOM_TARGET(${_targetname} - - # Run tests - ${_testrunner} ${ARGV3} - - # Running gcovr - COMMAND ${GCOVR_PATH} -x -r ${CMAKE_SOURCE_DIR} -e '${CMAKE_SOURCE_DIR}/tests/' -o ${_outputname}.xml - WORKING_DIRECTORY ${CMAKE_BINARY_DIR} - COMMENT "Running gcovr to produce Cobertura code coverage report." - ) - - # Show info where to find the report - ADD_CUSTOM_COMMAND(TARGET ${_targetname} POST_BUILD - COMMAND ; - COMMENT "Cobertura code coverage report saved in ${_outputname}.xml." - ) - -ENDFUNCTION() # SETUP_TARGET_FOR_COVERAGE_COBERTURA - diff --git a/solo-ckpool-source/src/jansson-2.14/cmake/FindSphinx.cmake b/solo-ckpool-source/src/jansson-2.14/cmake/FindSphinx.cmake deleted file mode 100644 index 3bf0a5d..0000000 --- a/solo-ckpool-source/src/jansson-2.14/cmake/FindSphinx.cmake +++ /dev/null @@ -1,315 +0,0 @@ -# -# PART B. DOWNLOADING AGREEMENT - LICENSE FROM SBIA WITH RIGHT TO SUBLICENSE ("SOFTWARE LICENSE"). -# ------------------------------------------------------------------------------------------------ -# -# 1. As used in this Software License, "you" means the individual downloading and/or -# using, reproducing, modifying, displaying and/or distributing the Software and -# the institution or entity which employs or is otherwise affiliated with such -# individual in connection therewith. The Section of Biomedical Image Analysis, -# Department of Radiology at the Universiy of Pennsylvania ("SBIA") hereby grants -# you, with right to sublicense, with respect to SBIA's rights in the software, -# and data, if any, which is the subject of this Software License (collectively, -# the "Software"), a royalty-free, non-exclusive license to use, reproduce, make -# derivative works of, display and distribute the Software, provided that: -# (a) you accept and adhere to all of the terms and conditions of this Software -# License; (b) in connection with any copy of or sublicense of all or any portion -# of the Software, all of the terms and conditions in this Software License shall -# appear in and shall apply to such copy and such sublicense, including without -# limitation all source and executable forms and on any user documentation, -# prefaced with the following words: "All or portions of this licensed product -# (such portions are the "Software") have been obtained under license from the -# Section of Biomedical Image Analysis, Department of Radiology at the University -# of Pennsylvania and are subject to the following terms and conditions:" -# (c) you preserve and maintain all applicable attributions, copyright notices -# and licenses included in or applicable to the Software; (d) modified versions -# of the Software must be clearly identified and marked as such, and must not -# be misrepresented as being the original Software; and (e) you consider making, -# but are under no obligation to make, the source code of any of your modifications -# to the Software freely available to others on an open source basis. -# -# 2. The license granted in this Software License includes without limitation the -# right to (i) incorporate the Software into proprietary programs (subject to -# any restrictions applicable to such programs), (ii) add your own copyright -# statement to your modifications of the Software, and (iii) provide additional -# or different license terms and conditions in your sublicenses of modifications -# of the Software; provided that in each case your use, reproduction or -# distribution of such modifications otherwise complies with the conditions -# stated in this Software License. -# -# 3. This Software License does not grant any rights with respect to third party -# software, except those rights that SBIA has been authorized by a third -# party to grant to you, and accordingly you are solely responsible for -# (i) obtaining any permissions from third parties that you need to use, -# reproduce, make derivative works of, display and distribute the Software, -# and (ii) informing your sublicensees, including without limitation your -# end-users, of their obligations to secure any such required permissions. -# -# 4. The Software has been designed for research purposes only and has not been -# reviewed or approved by the Food and Drug Administration or by any other -# agency. YOU ACKNOWLEDGE AND AGREE THAT CLINICAL APPLICATIONS ARE NEITHER -# RECOMMENDED NOR ADVISED. Any commercialization of the Software is at the -# sole risk of the party or parties engaged in such commercialization. -# You further agree to use, reproduce, make derivative works of, display -# and distribute the Software in compliance with all applicable governmental -# laws, regulations and orders, including without limitation those relating -# to export and import control. -# -# 5. The Software is provided "AS IS" and neither SBIA nor any contributor to -# the software (each a "Contributor") shall have any obligation to provide -# maintenance, support, updates, enhancements or modifications thereto. -# SBIA AND ALL CONTRIBUTORS SPECIFICALLY DISCLAIM ALL EXPRESS AND IMPLIED -# WARRANTIES OF ANY KIND INCLUDING, BUT NOT LIMITED TO, ANY WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. -# IN NO EVENT SHALL SBIA OR ANY CONTRIBUTOR BE LIABLE TO ANY PARTY FOR -# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES -# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY ARISING IN ANY WAY RELATED -# TO THE SOFTWARE, EVEN IF SBIA OR ANY CONTRIBUTOR HAS BEEN ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGES. TO THE MAXIMUM EXTENT NOT PROHIBITED BY LAW OR -# REGULATION, YOU FURTHER ASSUME ALL LIABILITY FOR YOUR USE, REPRODUCTION, -# MAKING OF DERIVATIVE WORKS, DISPLAY, LICENSE OR DISTRIBUTION OF THE SOFTWARE -# AND AGREE TO INDEMNIFY AND HOLD HARMLESS SBIA AND ALL CONTRIBUTORS FROM -# AND AGAINST ANY AND ALL CLAIMS, SUITS, ACTIONS, DEMANDS AND JUDGMENTS ARISING -# THEREFROM. -# -# 6. None of the names, logos or trademarks of SBIA or any of SBIA's affiliates -# or any of the Contributors, or any funding agency, may be used to endorse -# or promote products produced in whole or in part by operation of the Software -# or derived from or based on the Software without specific prior written -# permission from the applicable party. -# -# 7. Any use, reproduction or distribution of the Software which is not in accordance -# with this Software License shall automatically revoke all rights granted to you -# under this Software License and render Paragraphs 1 and 2 of this Software -# License null and void. -# -# 8. This Software License does not grant any rights in or to any intellectual -# property owned by SBIA or any Contributor except those rights expressly -# granted hereunder. -# -# -# PART C. MISCELLANEOUS -# --------------------- -# -# This Agreement shall be governed by and construed in accordance with the laws -# of The Commonwealth of Pennsylvania without regard to principles of conflicts -# of law. This Agreement shall supercede and replace any license terms that you -# may have agreed to previously with respect to Software from SBIA. -# -############################################################################## -# @file FindSphinx.cmake -# @brief Find Sphinx documentation build tools. -# -# @par Input variables: -# -# -# @tp @b Sphinx_DIR @endtp -# -# -# -# @tp @b SPHINX_DIR @endtp -# -# -# -# @tp @b Sphinx_FIND_COMPONENTS @endtp -# -# -#
Installation directory of Sphinx tools. Can also be set as environment variable.
Alternative environment variable for @c Sphinx_DIR.
Sphinx build tools to look for, i.e., 'apidoc' and/or 'build'.
-# -# @par Output variables: -# -# -# @tp @b Sphinx_FOUND @endtp -# -# -# -# @tp @b SPHINX_FOUND @endtp -# -# -# @tp @b SPHINX_EXECUTABLE @endtp -# -# -# -# @tp @b Sphinx_PYTHON_EXECUTABLE @endtp -# -# -# -# @tp @b Sphinx_PYTHON_OPTIONS @endtp -# -# -# -# @tp @b Sphinx-build_EXECUTABLE @endtp -# -# -# -# @tp @b Sphinx-apidoc_EXECUTABLE @endtp -# -# -# -# @tp @b Sphinx_VERSION_STRING @endtp -# -# -# -# @tp @b Sphinx_VERSION_MAJOR @endtp -# -# -# -# @tp @b Sphinx_VERSION_MINOR @endtp -# -# -# -# @tp @b Sphinx_VERSION_PATCH @endtp -# -# -#
Whether all or only the requested Sphinx build tools were found.
Alias for @c Sphinx_FOUND. -#
Non-cached alias for @c Sphinx-build_EXECUTABLE.
Python executable used to run sphinx-build. This is either the -# by default found Python interpreter or a specific version as -# specified by the shebang (#!) of the sphinx-build script.
A list of Python options extracted from the shebang (#!) of the -# sphinx-build script. The -E option is added by this module -# if the Python executable is not the system default to avoid -# problems with a differing setting of the @c PYTHONHOME.
Absolute path of the found sphinx-build tool.
Absolute path of the found sphinx-apidoc tool.
Sphinx version found e.g. 1.1.2.
Sphinx major version found e.g. 1.
Sphinx minor version found e.g. 1.
Sphinx patch version found e.g. 2.
-# -# @ingroup CMakeFindModules -############################################################################## - -set (_Sphinx_REQUIRED_VARS) - -# ---------------------------------------------------------------------------- -# initialize search -if (NOT Sphinx_DIR) - if (NOT $ENV{Sphinx_DIR} STREQUAL "") - set (Sphinx_DIR "$ENV{Sphinx_DIR}" CACHE PATH "Installation prefix of Sphinx (docutils)." FORCE) - else () - set (Sphinx_DIR "$ENV{SPHINX_DIR}" CACHE PATH "Installation prefix of Sphinx (docutils)." FORCE) - endif () -endif () - -# ---------------------------------------------------------------------------- -# default components to look for -if (NOT Sphinx_FIND_COMPONENTS) - set (Sphinx_FIND_COMPONENTS "build") -elseif (NOT Sphinx_FIND_COMPONENTS MATCHES "^(build|apidoc)$") - message (FATAL_ERROR "Invalid Sphinx component in: ${Sphinx_FIND_COMPONENTS}") -endif () - -# ---------------------------------------------------------------------------- -# find components, i.e., build tools -foreach (_Sphinx_TOOL IN LISTS Sphinx_FIND_COMPONENTS) - if (Sphinx_DIR) - find_program ( - Sphinx-${_Sphinx_TOOL}_EXECUTABLE - NAMES sphinx-${_Sphinx_TOOL} sphinx-${_Sphinx_TOOL}.py - HINTS "${Sphinx_DIR}" - PATH_SUFFIXES bin - DOC "The sphinx-${_Sphinx_TOOL} Python script." - NO_DEFAULT_PATH - ) - else () - find_program ( - Sphinx-${_Sphinx_TOOL}_EXECUTABLE - NAMES sphinx-${_Sphinx_TOOL} sphinx-${_Sphinx_TOOL}.py - DOC "The sphinx-${_Sphinx_TOOL} Python script." - ) - endif () - mark_as_advanced (Sphinx-${_Sphinx_TOOL}_EXECUTABLE) - list (APPEND _Sphinx_REQUIRED_VARS Sphinx-${_Sphinx_TOOL}_EXECUTABLE) -endforeach () - -# ---------------------------------------------------------------------------- -# determine Python executable used by Sphinx -if (Sphinx-build_EXECUTABLE) - # extract python executable from shebang of sphinx-build - find_package (PythonInterp QUIET) - set (Sphinx_PYTHON_EXECUTABLE "${PYTHON_EXECUTABLE}") - set (Sphinx_PYTHON_OPTIONS) - file (STRINGS "${Sphinx-build_EXECUTABLE}" FIRST_LINE LIMIT_COUNT 1) - if (FIRST_LINE MATCHES "^#!(.*/python.*)") # does not match "#!/usr/bin/env python" ! - string (REGEX REPLACE "^ +| +$" "" Sphinx_PYTHON_EXECUTABLE "${CMAKE_MATCH_1}") - if (Sphinx_PYTHON_EXECUTABLE MATCHES "([^ ]+) (.*)") - set (Sphinx_PYTHON_EXECUTABLE "${CMAKE_MATCH_1}") - string (REGEX REPLACE " +" ";" Sphinx_PYTHON_OPTIONS "${CMAKE_MATCH_2}") - endif () - endif () - # this is done to avoid problems with multiple Python versions being installed - # remember: CMake command if(STR EQUAL STR) is bad and may cause many troubles ! - string (REGEX REPLACE "([.+*?^$])" "\\\\\\1" _Sphinx_PYTHON_EXECUTABLE_RE "${PYTHON_EXECUTABLE}") - list (FIND Sphinx_PYTHON_OPTIONS -E IDX) - if (IDX EQUAL -1 AND NOT Sphinx_PYTHON_EXECUTABLE MATCHES "^${_Sphinx_PYTHON_EXECUTABLE_RE}$") - list (INSERT Sphinx_PYTHON_OPTIONS 0 -E) - endif () - unset (_Sphinx_PYTHON_EXECUTABLE_RE) -endif () - -# ---------------------------------------------------------------------------- -# determine Sphinx version -# some quick experiments by @ploxiln -# - sphinx 1.7 and later have the version output format like "sphinx-build 1.7.2" -# - sphinx 1.2 through 1.6 have the version output format like "Sphinx (sphinx-build) 1.2.2" -# - sphinx 1.1 and before do not have a "--version" flag, but it causes the help output like "-h" does which includes version like "Sphinx v1.0.2" -if (Sphinx-build_EXECUTABLE) - # intentionally use invalid -h option here as the help that is shown then - # will include the Sphinx version information - if (Sphinx_PYTHON_EXECUTABLE) - execute_process ( - COMMAND "${Sphinx_PYTHON_EXECUTABLE}" ${Sphinx_PYTHON_OPTIONS} "${Sphinx-build_EXECUTABLE}" --version - OUTPUT_VARIABLE _Sphinx_VERSION - ERROR_VARIABLE _Sphinx_VERSION - ) - elseif (UNIX) - execute_process ( - COMMAND "${Sphinx-build_EXECUTABLE}" --version - OUTPUT_VARIABLE _Sphinx_VERSION - ERROR_VARIABLE _Sphinx_VERSION - ) - endif () - - # The sphinx version can also contain a "b" instead of the last dot. - # For example "Sphinx v1.2b1" or "Sphinx 1.7.0b2" so we cannot just split on "." - if (_Sphinx_VERSION MATCHES "sphinx-build ([0-9]+\\.[0-9]+(\\.|a?|b?)([0-9]*)(b?)([0-9]*))") - set (Sphinx_VERSION_STRING "${CMAKE_MATCH_1}") - set (_SPHINX_VERSION_FOUND) - elseif (_Sphinx_VERSION MATCHES "Sphinx v([0-9]+\\.[0-9]+(\\.|b?)([0-9]*)(b?)([0-9]*))") - set (Sphinx_VERSION_STRING "${CMAKE_MATCH_1}") - set (_SPHINX_VERSION_FOUND) - elseif (_Sphinx_VERSION MATCHES "Sphinx \\(sphinx-build\\) ([0-9]+\\.[0-9]+(\\.|a?|b?)([0-9]*)(b?)([0-9]*))") - set (Sphinx_VERSION_STRING "${CMAKE_MATCH_1}") - set (_SPHINX_VERSION_FOUND) - endif () -endif () - -if(_SPHINX_VERSION_FOUND) - string(REGEX REPLACE "([0-9]+)\\.[0-9]+(\\.|b)[0-9]+" "\\1" Sphinx_VERSION_MAJOR ${Sphinx_VERSION_STRING}) - string(REGEX REPLACE "[0-9]+\\.([0-9]+)(\\.|b)[0-9]+" "\\1" Sphinx_VERSION_MINOR ${Sphinx_VERSION_STRING}) - string(REGEX REPLACE "[0-9]+\\.[0-9]+(\\.|b)([0-9]+)" "\\1" Sphinx_VERSION_PATCH ${Sphinx_VERSION_STRING}) - - # v1.2.0 -> v1.2 - if (Sphinx_VERSION_PATCH EQUAL 0) - string (REGEX REPLACE "\\.0$" "" Sphinx_VERSION_STRING "${Sphinx_VERSION_STRING}") - endif () -endif () - -# ---------------------------------------------------------------------------- -# compatibility with FindPythonInterp.cmake and FindPerl.cmake -set (SPHINX_EXECUTABLE "${Sphinx-build_EXECUTABLE}") - -# ---------------------------------------------------------------------------- -# handle the QUIETLY and REQUIRED arguments and set SPHINX_FOUND to TRUE if -# all listed variables are TRUE -include (FindPackageHandleStandardArgs) -FIND_PACKAGE_HANDLE_STANDARD_ARGS ( - Sphinx - REQUIRED_VARS - ${_Sphinx_REQUIRED_VARS} -# VERSION_VAR # This isn't available until CMake 2.8.8 so don't use it. - Sphinx_VERSION_STRING -) - -# ---------------------------------------------------------------------------- -# set Sphinx_DIR -if (NOT Sphinx_DIR AND Sphinx-build_EXECUTABLE) - get_filename_component (Sphinx_DIR "${Sphinx-build_EXECUTABLE}" PATH) - string (REGEX REPLACE "/bin/?" "" Sphinx_DIR "${Sphinx_DIR}") - set (Sphinx_DIR "${Sphinx_DIR}" CACHE PATH "Installation directory of Sphinx tools." FORCE) -endif () - -unset (_Sphinx_VERSION) -unset (_Sphinx_REQUIRED_VARS) \ No newline at end of file diff --git a/solo-ckpool-source/src/jansson-2.14/cmake/janssonConfig.cmake.in b/solo-ckpool-source/src/jansson-2.14/cmake/janssonConfig.cmake.in deleted file mode 100644 index abd6793..0000000 --- a/solo-ckpool-source/src/jansson-2.14/cmake/janssonConfig.cmake.in +++ /dev/null @@ -1,4 +0,0 @@ -@PACKAGE_INIT@ - -include("${CMAKE_CURRENT_LIST_DIR}/janssonTargets.cmake") -check_required_components("@PROJECT_NAME@") diff --git a/solo-ckpool-source/src/jansson-2.14/cmake/jansson_config.h.cmake b/solo-ckpool-source/src/jansson-2.14/cmake/jansson_config.h.cmake deleted file mode 100644 index 2f248cb..0000000 --- a/solo-ckpool-source/src/jansson-2.14/cmake/jansson_config.h.cmake +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2010-2016 Petri Lehtinen - * - * Jansson is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - * - * - * This file specifies a part of the site-specific configuration for - * Jansson, namely those things that affect the public API in - * jansson.h. - * - * The CMake system will generate the jansson_config.h file and - * copy it to the build and install directories. - */ - -#ifndef JANSSON_CONFIG_H -#define JANSSON_CONFIG_H - -/* Define this so that we can disable scattered automake configuration in source files */ -#ifndef JANSSON_USING_CMAKE -#define JANSSON_USING_CMAKE -#endif - -/* Note: when using cmake, JSON_INTEGER_IS_LONG_LONG is not defined nor used, - * as we will also check for __int64 etc types. - * (the definition was used in the automake system) */ - -/* Bring in the cmake-detected defines */ -#cmakedefine HAVE_STDINT_H 1 -#cmakedefine HAVE_INTTYPES_H 1 -#cmakedefine HAVE_SYS_TYPES_H 1 - -/* Include our standard type header for the integer typedef */ - -#if defined(HAVE_STDINT_H) -# include -#elif defined(HAVE_INTTYPES_H) -# include -#elif defined(HAVE_SYS_TYPES_H) -# include -#endif - - -/* If your compiler supports the inline keyword in C, JSON_INLINE is - defined to `inline', otherwise empty. In C++, the inline is always - supported. */ -#ifdef __cplusplus -#define JSON_INLINE inline -#else -#define JSON_INLINE @JSON_INLINE@ -#endif - - -#define json_int_t @JSON_INT_T@ -#define json_strtoint @JSON_STRTOINT@ -#define JSON_INTEGER_FORMAT @JSON_INTEGER_FORMAT@ - - -/* If locale.h and localeconv() are available, define to 1, otherwise to 0. */ -#define JSON_HAVE_LOCALECONV @JSON_HAVE_LOCALECONV@ - -/* If __atomic builtins are available they will be used to manage - reference counts of json_t. */ -#define JSON_HAVE_ATOMIC_BUILTINS @JSON_HAVE_ATOMIC_BUILTINS@ - -/* If __atomic builtins are not available we try using __sync builtins - to manage reference counts of json_t. */ -#define JSON_HAVE_SYNC_BUILTINS @JSON_HAVE_SYNC_BUILTINS@ - -/* Maximum recursion depth for parsing JSON input. - This limits the depth of e.g. array-within-array constructions. */ -#define JSON_PARSER_MAX_DEPTH 2048 - -#endif diff --git a/solo-ckpool-source/src/jansson-2.14/cmake/jansson_private_config.h.cmake b/solo-ckpool-source/src/jansson-2.14/cmake/jansson_private_config.h.cmake deleted file mode 100644 index b7c4514..0000000 --- a/solo-ckpool-source/src/jansson-2.14/cmake/jansson_private_config.h.cmake +++ /dev/null @@ -1,53 +0,0 @@ -#cmakedefine HAVE_ENDIAN_H 1 -#cmakedefine HAVE_FCNTL_H 1 -#cmakedefine HAVE_SCHED_H 1 -#cmakedefine HAVE_UNISTD_H 1 -#cmakedefine HAVE_SYS_PARAM_H 1 -#cmakedefine HAVE_SYS_STAT_H 1 -#cmakedefine HAVE_SYS_TIME_H 1 -#cmakedefine HAVE_SYS_TYPES_H 1 -#cmakedefine HAVE_STDINT_H 1 - -#cmakedefine HAVE_CLOSE 1 -#cmakedefine HAVE_GETPID 1 -#cmakedefine HAVE_GETTIMEOFDAY 1 -#cmakedefine HAVE_OPEN 1 -#cmakedefine HAVE_READ 1 -#cmakedefine HAVE_SCHED_YIELD 1 - -#cmakedefine HAVE_SYNC_BUILTINS 1 -#cmakedefine HAVE_ATOMIC_BUILTINS 1 - -#cmakedefine HAVE_LOCALE_H 1 -#cmakedefine HAVE_SETLOCALE 1 - -#cmakedefine HAVE_INT32_T 1 -#ifndef HAVE_INT32_T -# define int32_t @JSON_INT32@ -#endif - -#cmakedefine HAVE_UINT32_T 1 -#ifndef HAVE_UINT32_T -# define uint32_t @JSON_UINT32@ -#endif - -#cmakedefine HAVE_UINT16_T 1 -#ifndef HAVE_UINT16_T -# define uint16_t @JSON_UINT16@ -#endif - -#cmakedefine HAVE_UINT8_T 1 -#ifndef HAVE_UINT8_T -# define uint8_t @JSON_UINT8@ -#endif - -#cmakedefine HAVE_SSIZE_T 1 - -#ifndef HAVE_SSIZE_T -# define ssize_t @JSON_SSIZE@ -#endif - -#cmakedefine USE_URANDOM 1 -#cmakedefine USE_WINDOWS_CRYPTOAPI 1 - -#define INITIAL_HASHTABLE_ORDER @JANSSON_INITIAL_HASHTABLE_ORDER@ diff --git a/solo-ckpool-source/src/jansson-2.14/configure.ac b/solo-ckpool-source/src/jansson-2.14/configure.ac deleted file mode 100644 index bcca185..0000000 --- a/solo-ckpool-source/src/jansson-2.14/configure.ac +++ /dev/null @@ -1,168 +0,0 @@ -AC_PREREQ([2.71]) -AC_INIT([jansson],[2.14],[https://github.com/akheron/jansson/issues]) - -AC_CONFIG_AUX_DIR([.]) -AM_INIT_AUTOMAKE([1.10 foreign]) - -AC_CONFIG_SRCDIR([src/value.c]) -AC_CONFIG_HEADERS([jansson_private_config.h]) -AC_CONFIG_MACRO_DIRS([m4]) - -# Checks for programs. -AC_PROG_CC -AC_PROG_CXX -LT_INIT -AM_CONDITIONAL([GCC], [test x$GCC = xyes]) - -# Checks for libraries. - -# Checks for header files. -AC_CHECK_HEADERS([endian.h fcntl.h locale.h sched.h unistd.h sys/param.h sys/stat.h sys/time.h sys/types.h]) - -# Checks for typedefs, structures, and compiler characteristics. -AC_TYPE_INT32_T -AC_TYPE_UINT32_T -AC_TYPE_UINT16_T -AC_TYPE_UINT8_T -AC_TYPE_LONG_LONG_INT - -AC_C_INLINE -case $ac_cv_c_inline in - yes) json_inline=inline;; - no) json_inline=;; - *) json_inline=$ac_cv_c_inline;; -esac -AC_SUBST([json_inline]) - -# Checks for library functions. -AC_CHECK_FUNCS([close getpid gettimeofday localeconv open read sched_yield strtoll]) - -AC_MSG_CHECKING([for gcc __sync builtins]) -have_sync_builtins=no -AC_LINK_IFELSE([AC_LANG_PROGRAM([[]], [[unsigned long val; __sync_bool_compare_and_swap(&val, 0, 1); __sync_add_and_fetch(&val, 1); __sync_sub_and_fetch(&val, 1);]])],[have_sync_builtins=yes],[]) -if test "x$have_sync_builtins" = "xyes"; then - AC_DEFINE([HAVE_SYNC_BUILTINS], [1], - [Define to 1 if gcc's __sync builtins are available]) - json_have_sync_builtins=1 -else - json_have_sync_builtins=0 -fi -AC_SUBST([json_have_sync_builtins]) -AC_MSG_RESULT([$have_sync_builtins]) - -AC_MSG_CHECKING([for gcc __atomic builtins]) -have_atomic_builtins=no -AC_LINK_IFELSE([AC_LANG_PROGRAM([[]], [[char l; unsigned long v; __atomic_test_and_set(&l, __ATOMIC_RELAXED); __atomic_store_n(&v, 1, __ATOMIC_RELEASE); __atomic_load_n(&v, __ATOMIC_ACQUIRE); __atomic_add_fetch(&v, 1, __ATOMIC_ACQUIRE); __atomic_sub_fetch(&v, 1, __ATOMIC_RELEASE);]])],[have_atomic_builtins=yes],[]) -if test "x$have_atomic_builtins" = "xyes"; then - AC_DEFINE([HAVE_ATOMIC_BUILTINS], [1], - [Define to 1 if gcc's __atomic builtins are available]) - json_have_atomic_builtins=1 -else - json_have_atomic_builtins=0 -fi -AC_SUBST([json_have_atomic_builtins]) -AC_MSG_RESULT([$have_atomic_builtins]) - -case "$ac_cv_type_long_long_int$ac_cv_func_strtoll" in - yesyes) json_have_long_long=1;; - *) json_have_long_long=0;; -esac -AC_SUBST([json_have_long_long]) - -case "$ac_cv_header_locale_h$ac_cv_func_localeconv" in - yesyes) json_have_localeconv=1;; - *) json_have_localeconv=0;; -esac -AC_SUBST([json_have_localeconv]) - -# Features -AC_ARG_ENABLE([urandom], - [AS_HELP_STRING([--disable-urandom], - [Don't use /dev/urandom to seed the hash function])], - [use_urandom=$enableval], [use_urandom=yes]) - -if test "x$use_urandom" = xyes; then -AC_DEFINE([USE_URANDOM], [1], - [Define to 1 if /dev/urandom should be used for seeding the hash function]) -fi - -AC_ARG_ENABLE([windows-cryptoapi], - [AS_HELP_STRING([--disable-windows-cryptoapi], - [Don't use CryptGenRandom to seed the hash function])], - [use_windows_cryptoapi=$enableval], [use_windows_cryptoapi=yes]) - -if test "x$use_windows_cryptoapi" = xyes; then -AC_DEFINE([USE_WINDOWS_CRYPTOAPI], [1], - [Define to 1 if CryptGenRandom should be used for seeding the hash function]) -fi - -AC_ARG_ENABLE([initial-hashtable-order], - [AS_HELP_STRING([--enable-initial-hashtable-order=VAL], - [Number of buckets new object hashtables contain is 2 raised to this power. The default is 3, so empty hashtables contain 2^3 = 8 buckets.])], - [initial_hashtable_order=$enableval], [initial_hashtable_order=3]) -AC_DEFINE_UNQUOTED([INITIAL_HASHTABLE_ORDER], [$initial_hashtable_order], - [Number of buckets new object hashtables contain is 2 raised to this power. E.g. 3 -> 2^3 = 8.]) - -AC_ARG_ENABLE([Bsymbolic], - [AS_HELP_STRING([--disable-Bsymbolic], - [Avoid linking with -Bsymbolic-function])], - [], [with_Bsymbolic=check]) - -if test "x$with_Bsymbolic" != "xno" ; then - AC_MSG_CHECKING([for -Bsymbolic-functions linker flag]) - saved_LDFLAGS="${LDFLAGS}" - LDFLAGS=-Wl,-Bsymbolic-functions - AC_LINK_IFELSE([AC_LANG_PROGRAM([[]], [[int main (void) { return 0; }]])],[AC_MSG_RESULT([yes]) - have_Bsymbolic=yes],[AC_MSG_RESULT([no]) - have_Bsymbolic=no - ]) - LDFLAGS="${saved_LDFLAGS}" - - if test "x$with_Bsymbolic" = "xcheck" ; then - with_Bsymbolic=$have_Bsymbolic; - fi - if test "x$with_Bsymbolic:x$have_Bsymbolic" = "xyes:xno" ; then - AC_MSG_ERROR([linker support is required for -Bsymbolic]) - fi -fi - -AS_IF([test "x$with_Bsymbolic" = "xyes"], [JSON_BSYMBOLIC_LDFLAGS=-Wl[,]-Bsymbolic-functions]) -AC_SUBST(JSON_BSYMBOLIC_LDFLAGS) - -# Enable symbol versioning on GNU libc -JSON_SYMVER_LDFLAGS= -AC_CHECK_DECL([__GLIBC__], [JSON_SYMVER_LDFLAGS=-Wl,--default-symver]) -AC_SUBST([JSON_SYMVER_LDFLAGS]) - -AC_ARG_ENABLE([ossfuzzers], - [AS_HELP_STRING([--enable-ossfuzzers], - [Whether to generate the fuzzers for OSS-Fuzz])], - [have_ossfuzzers=yes], [have_ossfuzzers=no]) -AM_CONDITIONAL([USE_OSSFUZZERS], [test "x$have_ossfuzzers" = "xyes"]) - - -AC_SUBST([LIB_FUZZING_ENGINE]) -AM_CONDITIONAL([USE_OSSFUZZ_FLAG], [test "x$LIB_FUZZING_ENGINE" = "x-fsanitize=fuzzer"]) -AM_CONDITIONAL([USE_OSSFUZZ_STATIC], [test -f "$LIB_FUZZING_ENGINE"]) - - -if test x$GCC = xyes; then - AC_MSG_CHECKING(for -Wno-format-truncation) - wnoformat_truncation="-Wno-format-truncation" - AS_IF([${CC} -Wno-format-truncation -Werror -S -o /dev/null -xc /dev/null > /dev/null 2>&1], - [AC_MSG_RESULT(yes)], - [AC_MSG_RESULT(no) - wnoformat_truncation=""]) - - AM_CFLAGS="-Wall -Wextra -Wdeclaration-after-statement -Wshadow ${wnoformat_truncation}" -fi -AC_SUBST([AM_CFLAGS]) - -AC_CONFIG_FILES([ - jansson.pc - Makefile - doc/Makefile - src/Makefile - src/jansson_config.h -]) -AC_OUTPUT diff --git a/solo-ckpool-source/src/jansson-2.14/doc/Makefile.am b/solo-ckpool-source/src/jansson-2.14/doc/Makefile.am deleted file mode 100644 index 8186a7d..0000000 --- a/solo-ckpool-source/src/jansson-2.14/doc/Makefile.am +++ /dev/null @@ -1,20 +0,0 @@ -EXTRA_DIST = conf.py apiref.rst changes.rst conformance.rst \ - gettingstarted.rst github_commits.c index.rst threadsafety.rst \ - tutorial.rst upgrading.rst ext/refcounting.py - -SPHINXBUILD = sphinx-build -SPHINXOPTS = -d _build/doctrees $(SPHINXOPTS_EXTRA) - -html-local: - $(SPHINXBUILD) -b html $(SPHINXOPTS) $(srcdir) _build/html - -install-html-local: html - mkdir -p $(DESTDIR)$(htmldir) - cp -r _build/html $(DESTDIR)$(htmldir) - -uninstall-local: - rm -rf $(DESTDIR)$(htmldir) - -clean-local: - rm -rf _build - rm -f ext/refcounting.pyc diff --git a/solo-ckpool-source/src/jansson-2.14/doc/README b/solo-ckpool-source/src/jansson-2.14/doc/README deleted file mode 100644 index 930b3bf..0000000 --- a/solo-ckpool-source/src/jansson-2.14/doc/README +++ /dev/null @@ -1,5 +0,0 @@ -To build the documentation, invoke - - make html - -Then point your browser to _build/html/index.html. diff --git a/solo-ckpool-source/src/jansson-2.14/doc/apiref.rst b/solo-ckpool-source/src/jansson-2.14/doc/apiref.rst deleted file mode 100644 index 4bfb687..0000000 --- a/solo-ckpool-source/src/jansson-2.14/doc/apiref.rst +++ /dev/null @@ -1,2064 +0,0 @@ -.. _apiref: - -************* -API Reference -************* - -.. highlight:: c - -Preliminaries -============= - -All declarations are in :file:`jansson.h`, so it's enough to - -:: - - #include - -in each source file. - -All constants are prefixed with ``JSON_`` (except for those describing -the library version, prefixed with ``JANSSON_``). Other identifiers -are prefixed with ``json_``. Type names are suffixed with ``_t`` and -``typedef``\ 'd so that the ``struct`` keyword need not be used. - - -Library Version -=============== - -The Jansson version is of the form *A.B.C*, where *A* is the major -version, *B* is the minor version and *C* is the micro version. If the -micro version is zero, it's omitted from the version string, i.e. the -version string is just *A.B*. - -When a new release only fixes bugs and doesn't add new features or -functionality, the micro version is incremented. When new features are -added in a backwards compatible way, the minor version is incremented -and the micro version is set to zero. When there are backwards -incompatible changes, the major version is incremented and others are -set to zero. - -The following preprocessor constants specify the current version of -the library: - -``JANSSON_MAJOR_VERSION``, ``JANSSON_MINOR_VERSION``, ``JANSSON_MICRO_VERSION`` - Integers specifying the major, minor and micro versions, - respectively. - -``JANSSON_VERSION`` - A string representation of the current version, e.g. ``"1.2.1"`` or - ``"1.3"``. - -``JANSSON_VERSION_HEX`` - A 3-byte hexadecimal representation of the version, e.g. - ``0x010201`` for version 1.2.1 and ``0x010300`` for version 1.3. - This is useful in numeric comparisons, e.g.:: - - #if JANSSON_VERSION_HEX >= 0x010300 - /* Code specific to version 1.3 and above */ - #endif - -Additionally, there are functions to determine the version of Jansson at -runtime: - -.. function:: const char *jansson_version_str() - - Return the version of the Jansson library, in the same format as - the ``JANSSON_VERSION`` preprocessor constant. - - .. versionadded:: 2.13 - -.. function:: int jansson_version_cmp(int major, int minor, int micro) - - Returns an integer less than, equal to, or greater than zero if - the runtime version of Jansson is found, respectively, to be less - than, to match, or be greater than the provided *major*, *minor*, and - *micro*. - - .. versionadded:: 2.13 - -``JANSSON_THREAD_SAFE_REFCOUNT`` - If this value is defined all read-only operations and reference counting in - Jansson are thread safe. This value is not defined for versions older than - ``2.11`` or when the compiler does not provide built-in atomic functions. - - -Value Representation -==================== - -The JSON specification (:rfc:`4627`) defines the following data types: -*object*, *array*, *string*, *number*, *boolean*, and *null*. JSON -types are used dynamically; arrays and objects can hold any other data -type, including themselves. For this reason, Jansson's type system is -also dynamic in nature. There's one C type to represent all JSON -values, and this structure knows the type of the JSON value it holds. - -.. type:: json_t - - This data structure is used throughout the library to represent all - JSON values. It always contains the type of the JSON value it holds - and the value's reference count. The rest depends on the type of the - value. - -Objects of :type:`json_t` are always used through a pointer. There -are APIs for querying the type, manipulating the reference count, and -for constructing and manipulating values of different types. - -Unless noted otherwise, all API functions return an error value if an -error occurs. Depending on the function's signature, the error value -is either *NULL* or -1. Invalid arguments or invalid input are -apparent sources for errors. Memory allocation and I/O operations may -also cause errors. - - -Type ----- - -.. c:enum:: json_type - - The type of a JSON value. The following members are defined: - - +--------------------+ - | ``JSON_OBJECT`` | - +--------------------+ - | ``JSON_ARRAY`` | - +--------------------+ - | ``JSON_STRING`` | - +--------------------+ - | ``JSON_INTEGER`` | - +--------------------+ - | ``JSON_REAL`` | - +--------------------+ - | ``JSON_TRUE`` | - +--------------------+ - | ``JSON_FALSE`` | - +--------------------+ - | ``JSON_NULL`` | - +--------------------+ - - These correspond to JSON object, array, string, number, boolean and - null. A number is represented by either a value of the type - ``JSON_INTEGER`` or of the type ``JSON_REAL``. A true boolean value - is represented by a value of the type ``JSON_TRUE`` and false by a - value of the type ``JSON_FALSE``. - -.. function:: int json_typeof(const json_t *json) - - Return the type of the JSON value (a :type:`json_type` cast to - ``int``). *json* MUST NOT be *NULL*. This function is actually - implemented as a macro for speed. - -.. function:: int json_is_object(const json_t *json) - int json_is_array(const json_t *json) - int json_is_string(const json_t *json) - int json_is_integer(const json_t *json) - int json_is_real(const json_t *json) - int json_is_true(const json_t *json) - int json_is_false(const json_t *json) - int json_is_null(const json_t *json) - - These functions (actually macros) return true (non-zero) for values - of the given type, and false (zero) for values of other types and - for *NULL*. - -.. function:: int json_is_number(const json_t *json) - - Returns true for values of types ``JSON_INTEGER`` and - ``JSON_REAL``, and false for other types and for *NULL*. - -.. function:: int json_is_boolean(const json_t *json) - - Returns true for types ``JSON_TRUE`` and ``JSON_FALSE``, and false - for values of other types and for *NULL*. - -.. function:: int json_boolean_value(const json_t *json) - - Alias of :func:`json_is_true()`, i.e. returns 1 for ``JSON_TRUE`` - and 0 otherwise. - - .. versionadded:: 2.7 - - -.. _apiref-reference-count: - -Reference Count ---------------- - -The reference count is used to track whether a value is still in use -or not. When a value is created, it's reference count is set to 1. If -a reference to a value is kept (e.g. a value is stored somewhere for -later use), its reference count is incremented, and when the value is -no longer needed, the reference count is decremented. When the -reference count drops to zero, there are no references left, and the -value can be destroyed. - -.. function:: json_t *json_incref(json_t *json) - - Increment the reference count of *json* if it's not *NULL*. - Returns *json*. - -.. function:: void json_decref(json_t *json) - - Decrement the reference count of *json*. As soon as a call to - :func:`json_decref()` drops the reference count to zero, the value - is destroyed and it can no longer be used. - -Functions creating new JSON values set the reference count to 1. These -functions are said to return a **new reference**. Other functions -returning (existing) JSON values do not normally increase the -reference count. These functions are said to return a **borrowed -reference**. So, if the user will hold a reference to a value returned -as a borrowed reference, he must call :func:`json_incref`. As soon as -the value is no longer needed, :func:`json_decref` should be called -to release the reference. - -Normally, all functions accepting a JSON value as an argument will -manage the reference, i.e. increase and decrease the reference count -as needed. However, some functions **steal** the reference, i.e. they -have the same result as if the user called :func:`json_decref()` on -the argument right after calling the function. These functions are -suffixed with ``_new`` or have ``_new_`` somewhere in their name. - -For example, the following code creates a new JSON array and appends -an integer to it:: - - json_t *array, *integer; - - array = json_array(); - integer = json_integer(42); - - json_array_append(array, integer); - json_decref(integer); - -Note how the caller has to release the reference to the integer value -by calling :func:`json_decref()`. By using a reference stealing -function :func:`json_array_append_new()` instead of -:func:`json_array_append()`, the code becomes much simpler:: - - json_t *array = json_array(); - json_array_append_new(array, json_integer(42)); - -In this case, the user doesn't have to explicitly release the -reference to the integer value, as :func:`json_array_append_new()` -steals the reference when appending the value to the array. - -In the following sections it is clearly documented whether a function -will return a new or borrowed reference or steal a reference to its -argument. - - -Circular References -------------------- - -A circular reference is created when an object or an array is, -directly or indirectly, inserted inside itself. The direct case is -simple:: - - json_t *obj = json_object(); - json_object_set(obj, "foo", obj); - -Jansson will refuse to do this, and :func:`json_object_set()` (and -all the other such functions for objects and arrays) will return with -an error status. The indirect case is the dangerous one:: - - json_t *arr1 = json_array(), *arr2 = json_array(); - json_array_append(arr1, arr2); - json_array_append(arr2, arr1); - -In this example, the array ``arr2`` is contained in the array -``arr1``, and vice versa. Jansson cannot check for this kind of -indirect circular references without a performance hit, so it's up to -the user to avoid them. - -If a circular reference is created, the memory consumed by the values -cannot be freed by :func:`json_decref()`. The reference counts never -drops to zero because the values are keeping the references to each -other. Moreover, trying to encode the values with any of the encoding -functions will fail. The encoder detects circular references and -returns an error status. - -Scope Dereferencing -------------------- - -.. versionadded:: 2.9 - -It is possible to use the ``json_auto_t`` type to automatically -dereference a value at the end of a scope. For example:: - - void function(void) { - json_auto_t *value = NULL; - value = json_string("foo"); - /* json_decref(value) is automatically called. */ - } - -This feature is only available on GCC and Clang. So if your project -has a portability requirement for other compilers, you should avoid -this feature. - -Additionally, as always, care should be taken when passing values to -functions that steal references. - -True, False and Null -==================== - -These three values are implemented as singletons, so the returned -pointers won't change between invocations of these functions. - -.. function:: json_t *json_true(void) - - .. refcounting:: new - - Returns the JSON true value. - -.. function:: json_t *json_false(void) - - .. refcounting:: new - - Returns the JSON false value. - -.. function:: json_t *json_boolean(val) - - .. refcounting:: new - - Returns JSON false if ``val`` is zero, and JSON true otherwise. - This is a macro, and equivalent to ``val ? json_true() : - json_false()``. - - .. versionadded:: 2.4 - - -.. function:: json_t *json_null(void) - - .. refcounting:: new - - Returns the JSON null value. - - -String -====== - -Jansson uses UTF-8 as the character encoding. All JSON strings must be -valid UTF-8 (or ASCII, as it's a subset of UTF-8). All Unicode -codepoints U+0000 through U+10FFFF are allowed, but you must use -length-aware functions if you wish to embed null bytes in strings. - -.. function:: json_t *json_string(const char *value) - - .. refcounting:: new - - Returns a new JSON string, or *NULL* on error. *value* must be a - valid null terminated UTF-8 encoded Unicode string. - -.. function:: json_t *json_stringn(const char *value, size_t len) - - .. refcounting:: new - - Like :func:`json_string`, but with explicit length, so *value* may - contain null characters or not be null terminated. - - .. versionadded:: 2.7 - -.. function:: json_t *json_string_nocheck(const char *value) - - .. refcounting:: new - - Like :func:`json_string`, but doesn't check that *value* is valid - UTF-8. Use this function only if you are certain that this really - is the case (e.g. you have already checked it by other means). - -.. function:: json_t *json_stringn_nocheck(const char *value, size_t len) - - .. refcounting:: new - - Like :func:`json_string_nocheck`, but with explicit length, so - *value* may contain null characters or not be null terminated. - - .. versionadded:: 2.7 - -.. function:: const char *json_string_value(const json_t *string) - - Returns the associated value of *string* as a null terminated UTF-8 - encoded string, or *NULL* if *string* is not a JSON string. - - The returned value is read-only and must not be modified or freed by - the user. It is valid as long as *string* exists, i.e. as long as - its reference count has not dropped to zero. - -.. function:: size_t json_string_length(const json_t *string) - - Returns the length of *string* in its UTF-8 presentation, or zero - if *string* is not a JSON string. - - .. versionadded:: 2.7 - -.. function:: int json_string_set(json_t *string, const char *value) - - Sets the associated value of *string* to *value*. *value* must be a - valid UTF-8 encoded Unicode string. Returns 0 on success and -1 on - error. - -.. function:: int json_string_setn(json_t *string, const char *value, size_t len) - - Like :func:`json_string_set`, but with explicit length, so *value* - may contain null characters or not be null terminated. - - .. versionadded:: 2.7 - -.. function:: int json_string_set_nocheck(json_t *string, const char *value) - - Like :func:`json_string_set`, but doesn't check that *value* is - valid UTF-8. Use this function only if you are certain that this - really is the case (e.g. you have already checked it by other - means). - -.. function:: int json_string_setn_nocheck(json_t *string, const char *value, size_t len) - - Like :func:`json_string_set_nocheck`, but with explicit length, - so *value* may contain null characters or not be null terminated. - - .. versionadded:: 2.7 - -.. function:: json_t *json_sprintf(const char *format, ...) - json_t *json_vsprintf(const char *format, va_list ap) - - .. refcounting:: new - - Construct a JSON string from a format string and varargs, just like - :func:`printf()`. - - .. versionadded:: 2.11 - - -Number -====== - -The JSON specification only contains one numeric type, "number". The C -programming language has distinct types for integer and floating-point -numbers, so for practical reasons Jansson also has distinct types for -the two. They are called "integer" and "real", respectively. For more -information, see :ref:`rfc-conformance`. - -.. type:: json_int_t - - This is the C type that is used to store JSON integer values. It - represents the widest integer type available on your system. In - practice it's just a typedef of ``long long`` if your compiler - supports it, otherwise ``long``. - - Usually, you can safely use plain ``int`` in place of - ``json_int_t``, and the implicit C integer conversion handles the - rest. Only when you know that you need the full 64-bit range, you - should use ``json_int_t`` explicitly. - -``JSON_INTEGER_IS_LONG_LONG`` - This is a preprocessor variable that holds the value 1 if - :type:`json_int_t` is ``long long``, and 0 if it's ``long``. It - can be used as follows:: - - #if JSON_INTEGER_IS_LONG_LONG - /* Code specific for long long */ - #else - /* Code specific for long */ - #endif - -``JSON_INTEGER_FORMAT`` - This is a macro that expands to a :func:`printf()` conversion - specifier that corresponds to :type:`json_int_t`, without the - leading ``%`` sign, i.e. either ``"lld"`` or ``"ld"``. This macro - is required because the actual type of :type:`json_int_t` can be - either ``long`` or ``long long``, and :func:`printf()` requires - different length modifiers for the two. - - Example:: - - json_int_t x = 123123123; - printf("x is %" JSON_INTEGER_FORMAT "\n", x); - - -.. function:: json_t *json_integer(json_int_t value) - - .. refcounting:: new - - Returns a new JSON integer, or *NULL* on error. - -.. function:: json_int_t json_integer_value(const json_t *integer) - - Returns the associated value of *integer*, or 0 if *json* is not a - JSON integer. - -.. function:: int json_integer_set(const json_t *integer, json_int_t value) - - Sets the associated value of *integer* to *value*. Returns 0 on - success and -1 if *integer* is not a JSON integer. - -.. function:: json_t *json_real(double value) - - .. refcounting:: new - - Returns a new JSON real, or *NULL* on error. - -.. function:: double json_real_value(const json_t *real) - - Returns the associated value of *real*, or 0.0 if *real* is not a - JSON real. - -.. function:: int json_real_set(const json_t *real, double value) - - Sets the associated value of *real* to *value*. Returns 0 on - success and -1 if *real* is not a JSON real. - -.. function:: double json_number_value(const json_t *json) - - Returns the associated value of the JSON integer or JSON real - *json*, cast to double regardless of the actual type. If *json* is - neither JSON real nor JSON integer, 0.0 is returned. - - -Array -===== - -A JSON array is an ordered collection of other JSON values. - -.. function:: json_t *json_array(void) - - .. refcounting:: new - - Returns a new JSON array, or *NULL* on error. Initially, the array - is empty. - -.. function:: size_t json_array_size(const json_t *array) - - Returns the number of elements in *array*, or 0 if *array* is NULL - or not a JSON array. - -.. function:: json_t *json_array_get(const json_t *array, size_t index) - - .. refcounting:: borrow - - Returns the element in *array* at position *index*. The valid range - for *index* is from 0 to the return value of - :func:`json_array_size()` minus 1. If *array* is not a JSON array, - if *array* is *NULL*, or if *index* is out of range, *NULL* is - returned. - -.. function:: int json_array_set(json_t *array, size_t index, json_t *value) - - Replaces the element in *array* at position *index* with *value*. - The valid range for *index* is from 0 to the return value of - :func:`json_array_size()` minus 1. Returns 0 on success and -1 on - error. - -.. function:: int json_array_set_new(json_t *array, size_t index, json_t *value) - - Like :func:`json_array_set()` but steals the reference to *value*. - This is useful when *value* is newly created and not used after - the call. - -.. function:: int json_array_append(json_t *array, json_t *value) - - Appends *value* to the end of *array*, growing the size of *array* - by 1. Returns 0 on success and -1 on error. - -.. function:: int json_array_append_new(json_t *array, json_t *value) - - Like :func:`json_array_append()` but steals the reference to - *value*. This is useful when *value* is newly created and not used - after the call. - -.. function:: int json_array_insert(json_t *array, size_t index, json_t *value) - - Inserts *value* to *array* at position *index*, shifting the - elements at *index* and after it one position towards the end of - the array. Returns 0 on success and -1 on error. - -.. function:: int json_array_insert_new(json_t *array, size_t index, json_t *value) - - Like :func:`json_array_insert()` but steals the reference to - *value*. This is useful when *value* is newly created and not used - after the call. - -.. function:: int json_array_remove(json_t *array, size_t index) - - Removes the element in *array* at position *index*, shifting the - elements after *index* one position towards the start of the array. - Returns 0 on success and -1 on error. The reference count of the - removed value is decremented. - -.. function:: int json_array_clear(json_t *array) - - Removes all elements from *array*. Returns 0 on success and -1 on - error. The reference count of all removed values are decremented. - -.. function:: int json_array_extend(json_t *array, json_t *other_array) - - Appends all elements in *other_array* to the end of *array*. - Returns 0 on success and -1 on error. - -.. function:: void json_array_foreach(array, index, value) - - Iterate over every element of ``array``, running the block - of code that follows each time with the proper values set to - variables ``index`` and ``value``, of types :type:`size_t` and - :type:`json_t` pointer respectively. Example:: - - /* array is a JSON array */ - size_t index; - json_t *value; - - json_array_foreach(array, index, value) { - /* block of code that uses index and value */ - } - - The items are returned in increasing index order. - - This macro expands to an ordinary ``for`` statement upon - preprocessing, so its performance is equivalent to that of - hand-written code using the array access functions. - The main advantage of this macro is that it abstracts - away the complexity, and makes for more concise and readable code. - - .. versionadded:: 2.5 - - -Object -====== - -A JSON object is a dictionary of key-value pairs, where the key is a -Unicode string and the value is any JSON value. - -Even though null bytes are allowed in string values, they are not -allowed in object keys. - -.. function:: json_t *json_object(void) - - .. refcounting:: new - - Returns a new JSON object, or *NULL* on error. Initially, the - object is empty. - -.. function:: size_t json_object_size(const json_t *object) - - Returns the number of elements in *object*, or 0 if *object* is not - a JSON object. - -.. function:: json_t *json_object_get(const json_t *object, const char *key) - - .. refcounting:: borrow - - Get a value corresponding to *key* from *object*. Returns *NULL* if - *key* is not found and on error. - -.. function:: json_t *json_object_getn(const json_t *object, const char *key, size_t key_len) - - .. refcounting:: borrow - - Like :func:`json_object_get`, but give the fixed-length *key* with length *key_len*. - See :ref:`fixed_length_keys` for details. - - .. versionadded:: 2.14 - -.. function:: int json_object_set(json_t *object, const char *key, json_t *value) - - Set the value of *key* to *value* in *object*. *key* must be a - valid null terminated UTF-8 encoded Unicode string. If there - already is a value for *key*, it is replaced by the new value. - Returns 0 on success and -1 on error. - -.. function:: int json_object_setn(json_t *object, const char *key, size_t key_len, json_t *value) - - Like :func:`json_object_set`, but give the fixed-length *key* with length *key_len*. - See :ref:`fixed_length_keys` for details. - - .. versionadded:: 2.14 - -.. function:: int json_object_set_nocheck(json_t *object, const char *key, json_t *value) - - Like :func:`json_object_set`, but doesn't check that *key* is - valid UTF-8. Use this function only if you are certain that this - really is the case (e.g. you have already checked it by other - means). - -.. function:: int json_object_setn_nocheck(json_t *object, const char *key, size_t key_len, json_t *value) - - Like :func:`json_object_set_nocheck`, but give the fixed-length *key* with length *key_len*. - See :ref:`fixed_length_keys` for details. - - .. versionadded:: 2.14 - -.. function:: int json_object_set_new(json_t *object, const char *key, json_t *value) - - Like :func:`json_object_set()` but steals the reference to - *value*. This is useful when *value* is newly created and not used - after the call. - -.. function:: int json_object_setn_new(json_t *object, const char *key, size_t key_len, json_t *value) - - Like :func:`json_object_set_new`, but give the fixed-length *key* with length *key_len*. - See :ref:`fixed_length_keys` for details. - - .. versionadded:: 2.14 - -.. function:: int json_object_set_new_nocheck(json_t *object, const char *key, json_t *value) - - Like :func:`json_object_set_new`, but doesn't check that *key* is - valid UTF-8. Use this function only if you are certain that this - really is the case (e.g. you have already checked it by other - means). - -.. function:: int json_object_setn_new_nocheck(json_t *object, const char *key, size_t key_len, json_t *value) - - Like :func:`json_object_set_new_nocheck`, but give the fixed-length *key* with length *key_len*. - See :ref:`fixed_length_keys` for details. - - .. versionadded:: 2.14 - -.. function:: int json_object_del(json_t *object, const char *key) - - Delete *key* from *object* if it exists. Returns 0 on success, or - -1 if *key* was not found. The reference count of the removed value - is decremented. - -.. function:: int json_object_deln(json_t *object, const char *key, size_t key_len) - - Like :func:`json_object_del`, but give the fixed-length *key* with length *key_len*. - See :ref:`fixed_length_keys` for details. - - .. versionadded:: 2.14 - -.. function:: int json_object_clear(json_t *object) - - Remove all elements from *object*. Returns 0 on success and -1 if - *object* is not a JSON object. The reference count of all removed - values are decremented. - -.. function:: int json_object_update(json_t *object, json_t *other) - - Update *object* with the key-value pairs from *other*, overwriting - existing keys. Returns 0 on success or -1 on error. - -.. function:: int json_object_update_existing(json_t *object, json_t *other) - - Like :func:`json_object_update()`, but only the values of existing - keys are updated. No new keys are created. Returns 0 on success or - -1 on error. - - .. versionadded:: 2.3 - -.. function:: int json_object_update_missing(json_t *object, json_t *other) - - Like :func:`json_object_update()`, but only new keys are created. - The value of any existing key is not changed. Returns 0 on success - or -1 on error. - - .. versionadded:: 2.3 - -.. function:: int json_object_update_new(json_t *object, json_t *other) - - Like :func:`json_object_update()`, but steals the reference to - *other*. This is useful when *other* is newly created and not used - after the call. - -.. function:: int json_object_update_existing_new(json_t *object, json_t *other) - - Like :func:`json_object_update_new()`, but only the values of existing - keys are updated. No new keys are created. Returns 0 on success or - -1 on error. - -.. function:: int json_object_update_missing_new(json_t *object, json_t *other) - - Like :func:`json_object_update_new()`, but only new keys are created. - The value of any existing key is not changed. Returns 0 on success - or -1 on error. - -.. function:: int json_object_update_recursive(json_t *object, json_t *other) - - Like :func:`json_object_update()`, but object values in *other* are - recursively merged with the corresponding values in *object* if they are also - objects, instead of overwriting them. Returns 0 on success or -1 on error. - -.. function:: void json_object_foreach(object, key, value) - - Iterate over every key-value pair of ``object``, running the block - of code that follows each time with the proper values set to - variables ``key`` and ``value``, of types ``const char *`` and - :type:`json_t` pointer respectively. Example:: - - /* obj is a JSON object */ - const char *key; - json_t *value; - - json_object_foreach(obj, key, value) { - /* block of code that uses key and value */ - } - - The items are returned in the order they were inserted to the - object. - - **Note:** It's not safe to call ``json_object_del(object, key)`` or ``json_object_deln(object, key, key_len)`` - during iteration. If you need to, use - :func:`json_object_foreach_safe` instead. - - This macro expands to an ordinary ``for`` statement upon - preprocessing, so its performance is equivalent to that of - hand-written iteration code using the object iteration protocol - (see below). The main advantage of this macro is that it abstracts - away the complexity behind iteration, and makes for more concise and - readable code. - - .. versionadded:: 2.3 - - -.. function:: void json_object_foreach_safe(object, tmp, key, value) - - Like :func:`json_object_foreach()`, but it's safe to call - ``json_object_del(object, key)`` or ``json_object_deln(object, key, key_len)`` during iteration. - You need to pass an extra ``void *`` parameter ``tmp`` that is used for temporary storage. - - .. versionadded:: 2.8 - -.. function:: void json_object_keylen_foreach(object, key, key_len, value) - - Like :c:func:`json_object_foreach`, but in *key_len* stored length of the *key*. - Example:: - - /* obj is a JSON object */ - const char *key; - json_t *value; - size_t len; - - json_object_keylen_foreach(obj, key, len, value) { - printf("got key %s with length %zu\n", key, len); - } - - **Note:** It's not safe to call ``json_object_deln(object, key, key_len)`` - during iteration. If you need to, use - :func:`json_object_keylen_foreach_safe` instead. - - .. versionadded:: 2.14 - - -.. function:: void json_object_keylen_foreach_safe(object, tmp, key, key_len, value) - - Like :func:`json_object_keylen_foreach()`, but it's safe to call - ``json_object_deln(object, key, key_len)`` during iteration. - You need to pass an extra ``void *`` parameter ``tmp`` that is used for temporary storage. - - .. versionadded:: 2.14 - -The following functions can be used to iterate through all key-value -pairs in an object. The items are returned in the order they were -inserted to the object. - -.. function:: void *json_object_iter(json_t *object) - - Returns an opaque iterator which can be used to iterate over all - key-value pairs in *object*, or *NULL* if *object* is empty. - -.. function:: void *json_object_iter_at(json_t *object, const char *key) - - Like :func:`json_object_iter()`, but returns an iterator to the - key-value pair in *object* whose key is equal to *key*, or NULL if - *key* is not found in *object*. Iterating forward to the end of - *object* only yields all key-value pairs of the object if *key* - happens to be the first key in the underlying hash table. - -.. function:: void *json_object_iter_next(json_t *object, void *iter) - - Returns an iterator pointing to the next key-value pair in *object* - after *iter*, or *NULL* if the whole object has been iterated - through. - -.. function:: const char *json_object_iter_key(void *iter) - - Extract the associated key from *iter*. - -.. function:: size_t json_object_iter_key_len(void *iter) - - Extract the associated key length from *iter*. - - .. versionadded:: 2.14 - -.. function:: json_t *json_object_iter_value(void *iter) - - .. refcounting:: borrow - - Extract the associated value from *iter*. - -.. function:: int json_object_iter_set(json_t *object, void *iter, json_t *value) - - Set the value of the key-value pair in *object*, that is pointed to - by *iter*, to *value*. - -.. function:: int json_object_iter_set_new(json_t *object, void *iter, json_t *value) - - Like :func:`json_object_iter_set()`, but steals the reference to - *value*. This is useful when *value* is newly created and not used - after the call. - -.. function:: void *json_object_key_to_iter(const char *key) - - Like :func:`json_object_iter_at()`, but much faster. Only works for - values returned by :func:`json_object_iter_key()`. Using other keys - will lead to segfaults. This function is used internally to - implement :func:`json_object_foreach`. Example:: - - /* obj is a JSON object */ - const char *key; - json_t *value; - - void *iter = json_object_iter(obj); - while(iter) - { - key = json_object_iter_key(iter); - value = json_object_iter_value(iter); - /* use key and value ... */ - iter = json_object_iter_next(obj, iter); - } - - .. versionadded:: 2.3 - -.. function:: void json_object_seed(size_t seed) - - Seed the hash function used in Jansson's hashtable implementation. - The seed is used to randomize the hash function so that an - attacker cannot control its output. - - If *seed* is 0, Jansson generates the seed itself by reading - random data from the operating system's entropy sources. If no - entropy sources are available, falls back to using a combination - of the current timestamp (with microsecond precision if possible) - and the process ID. - - If called at all, this function must be called before any calls to - :func:`json_object()`, either explicit or implicit. If this - function is not called by the user, the first call to - :func:`json_object()` (either explicit or implicit) seeds the hash - function. See :ref:`thread-safety` for notes on thread safety. - - If repeatable results are required, for e.g. unit tests, the hash - function can be "unrandomized" by calling :func:`json_object_seed` - with a constant value on program startup, e.g. - ``json_object_seed(1)``. - - .. versionadded:: 2.6 - - -Error reporting -=============== - -Jansson uses a single struct type to pass error information to the -user. See sections :ref:`apiref-decoding`, :ref:`apiref-pack` and -:ref:`apiref-unpack` for functions that pass error information using -this struct. - -.. type:: json_error_t - - .. member:: char text[] - - The error message (in UTF-8), or an empty string if a message is - not available. - - The last byte of this array contains a numeric error code. Use - :func:`json_error_code()` to extract this code. - - .. member:: char source[] - - Source of the error. This can be (a part of) the file name or a - special identifier in angle brackets (e.g. ````). - - .. member:: int line - - The line number on which the error occurred. - - .. member:: int column - - The column on which the error occurred. Note that this is the - *character column*, not the byte column, i.e. a multibyte UTF-8 - character counts as one column. - - .. member:: int position - - The position in bytes from the start of the input. This is - useful for debugging Unicode encoding problems. - -The normal use of :type:`json_error_t` is to allocate it on the stack, -and pass a pointer to a function. Example:: - - int main() { - json_t *json; - json_error_t error; - - json = json_load_file("/path/to/file.json", 0, &error); - if(!json) { - /* the error variable contains error information */ - } - ... - } - -Also note that if the call succeeded (``json != NULL`` in the above -example), the contents of ``error`` are generally left unspecified. -The decoding functions write to the ``position`` member also on -success. See :ref:`apiref-decoding` for more info. - -All functions also accept *NULL* as the :type:`json_error_t` pointer, -in which case no error information is returned to the caller. - -.. c:enum:: json_error_code - - An enumeration containing numeric error codes. The following errors are - currently defined: - - ``json_error_unknown`` - - Unknown error. This should only be returned for non-errorneous - :type:`json_error_t` structures. - - ``json_error_out_of_memory`` - - The library couldn’t allocate any heap memory. - - ``json_error_stack_overflow`` - - Nesting too deep. - - ``json_error_cannot_open_file`` - - Couldn’t open input file. - - ``json_error_invalid_argument`` - - A function argument was invalid. - - ``json_error_invalid_utf8`` - - The input string isn’t valid UTF-8. - - ``json_error_premature_end_of_input`` - - The input ended in the middle of a JSON value. - - ``json_error_end_of_input_expected`` - - There was some text after the end of a JSON value. See the - ``JSON_DISABLE_EOF_CHECK`` flag. - - ``json_error_invalid_syntax`` - - JSON syntax error. - - ``json_error_invalid_format`` - - Invalid format string for packing or unpacking. - - ``json_error_wrong_type`` - - When packing or unpacking, the actual type of a value differed from the - one specified in the format string. - - ``json_error_null_character`` - - A null character was detected in a JSON string. See the - ``JSON_ALLOW_NUL`` flag. - - ``json_error_null_value`` - - When packing or unpacking, some key or value was ``NULL``. - - ``json_error_null_byte_in_key`` - - An object key would contain a null byte. Jansson can’t represent such - keys; see :ref:`rfc-conformance`. - - ``json_error_duplicate_key`` - - Duplicate key in object. See the ``JSON_REJECT_DUPLICATES`` flag. - - ``json_error_numeric_overflow`` - - When converting a JSON number to a C numeric type, a numeric overflow - was detected. - - ``json_error_item_not_found`` - - Key in object not found. - - ``json_error_index_out_of_range`` - - Array index is out of range. - - .. versionadded:: 2.11 - -.. function:: enum json_error_code json_error_code(const json_error_t *error) - - Returns the error code embedded in ``error->text``. - - .. versionadded:: 2.11 - - -Encoding -======== - -This section describes the functions that can be used to encode -values to JSON. By default, only objects and arrays can be encoded -directly, since they are the only valid *root* values of a JSON text. -To encode any JSON value, use the ``JSON_ENCODE_ANY`` flag (see -below). - -By default, the output has no newlines, and spaces are used between -array and object elements for a readable output. This behavior can be -altered by using the ``JSON_INDENT`` and ``JSON_COMPACT`` flags -described below. A newline is never appended to the end of the encoded -JSON data. - -Each function takes a *flags* parameter that controls some aspects of -how the data is encoded. Its default value is 0. The following macros -can be ORed together to obtain *flags*. - -``JSON_INDENT(n)`` - Pretty-print the result, using newlines between array and object - items, and indenting with *n* spaces. The valid range for *n* is - between 0 and 31 (inclusive), other values result in an undefined - output. If ``JSON_INDENT`` is not used or *n* is 0, no newlines are - inserted between array and object items. - - The ``JSON_MAX_INDENT`` constant defines the maximum indentation - that can be used, and its value is 31. - - .. versionchanged:: 2.7 - Added ``JSON_MAX_INDENT``. - -``JSON_COMPACT`` - This flag enables a compact representation, i.e. sets the separator - between array and object items to ``","`` and between object keys - and values to ``":"``. Without this flag, the corresponding - separators are ``", "`` and ``": "`` for more readable output. - -``JSON_ENSURE_ASCII`` - If this flag is used, the output is guaranteed to consist only of - ASCII characters. This is achieved by escaping all Unicode - characters outside the ASCII range. - -``JSON_SORT_KEYS`` - If this flag is used, all the objects in output are sorted by key. - This is useful e.g. if two JSON texts are diffed or visually - compared. - -``JSON_PRESERVE_ORDER`` - **Deprecated since version 2.8:** Order of object keys - is always preserved. - - Prior to version 2.8: If this flag is used, object keys in the - output are sorted into the same order in which they were first - inserted to the object. For example, decoding a JSON text and then - encoding with this flag preserves the order of object keys. - -``JSON_ENCODE_ANY`` - Specifying this flag makes it possible to encode any JSON value on - its own. Without it, only objects and arrays can be passed as the - *json* value to the encoding functions. - - **Note:** Encoding any value may be useful in some scenarios, but - it's generally discouraged as it violates strict compatibility with - :rfc:`4627`. If you use this flag, don't expect interoperability - with other JSON systems. - - .. versionadded:: 2.1 - -``JSON_ESCAPE_SLASH`` - Escape the ``/`` characters in strings with ``\/``. - - .. versionadded:: 2.4 - -``JSON_REAL_PRECISION(n)`` - Output all real numbers with at most *n* digits of precision. The - valid range for *n* is between 0 and 31 (inclusive), and other - values result in an undefined behavior. - - By default, the precision is 17, to correctly and losslessly encode - all IEEE 754 double precision floating point numbers. - - .. versionadded:: 2.7 - -``JSON_EMBED`` - If this flag is used, the opening and closing characters of the top-level - array ('[', ']') or object ('{', '}') are omitted during encoding. This - flag is useful when concatenating multiple arrays or objects into a stream. - - .. versionadded:: 2.10 - -These functions output UTF-8: - -.. function:: char *json_dumps(const json_t *json, size_t flags) - - Returns the JSON representation of *json* as a string, or *NULL* on - error. *flags* is described above. The return value must be freed - by the caller using :func:`free()`. Note that if you have called - :func:`json_set_alloc_funcs()` to override :func:`free()`, you should - call your custom free function instead to free the return value. - -.. function:: size_t json_dumpb(const json_t *json, char *buffer, size_t size, size_t flags) - - Writes the JSON representation of *json* to the *buffer* of - *size* bytes. Returns the number of bytes that would be written - or 0 on error. *flags* is described above. *buffer* is not - null-terminated. - - This function never writes more than *size* bytes. If the return - value is greater than *size*, the contents of the *buffer* are - undefined. This behavior enables you to specify a NULL *buffer* - to determine the length of the encoding. For example:: - - size_t size = json_dumpb(json, NULL, 0, 0); - if (size == 0) - return -1; - - char *buf = alloca(size); - - size = json_dumpb(json, buf, size, 0); - - .. versionadded:: 2.10 - -.. function:: int json_dumpf(const json_t *json, FILE *output, size_t flags) - - Write the JSON representation of *json* to the stream *output*. - *flags* is described above. Returns 0 on success and -1 on error. - If an error occurs, something may have already been written to - *output*. In this case, the output is undefined and most likely not - valid JSON. - -.. function:: int json_dumpfd(const json_t *json, int output, size_t flags) - - Write the JSON representation of *json* to the stream *output*. - *flags* is described above. Returns 0 on success and -1 on error. - If an error occurs, something may have already been written to - *output*. In this case, the output is undefined and most likely not - valid JSON. - - It is important to note that this function can only succeed on stream - file descriptors (such as SOCK_STREAM). Using this function on a - non-stream file descriptor will result in undefined behavior. For - non-stream file descriptors, see instead :func:`json_dumpb()`. - - This function requires POSIX and fails on all non-POSIX systems. - - .. versionadded:: 2.10 - -.. function:: int json_dump_file(const json_t *json, const char *path, size_t flags) - - Write the JSON representation of *json* to the file *path*. If - *path* already exists, it is overwritten. *flags* is described - above. Returns 0 on success and -1 on error. - -.. type:: json_dump_callback_t - - A typedef for a function that's called by - :func:`json_dump_callback()`:: - - typedef int (*json_dump_callback_t)(const char *buffer, size_t size, void *data); - - *buffer* points to a buffer containing a chunk of output, *size* is - the length of the buffer, and *data* is the corresponding - :func:`json_dump_callback()` argument passed through. - - *buffer* is guaranteed to be a valid UTF-8 string (i.e. multi-byte - code unit sequences are preserved). *buffer* never contains - embedded null bytes. - - On error, the function should return -1 to stop the encoding - process. On success, it should return 0. - - .. versionadded:: 2.2 - -.. function:: int json_dump_callback(const json_t *json, json_dump_callback_t callback, void *data, size_t flags) - - Call *callback* repeatedly, passing a chunk of the JSON - representation of *json* each time. *flags* is described above. - Returns 0 on success and -1 on error. - - .. versionadded:: 2.2 - - -.. _apiref-decoding: - -Decoding -======== - -This section describes the functions that can be used to decode JSON -text to the Jansson representation of JSON data. The JSON -specification requires that a JSON text is either a serialized array -or object, and this requirement is also enforced with the following -functions. In other words, the top level value in the JSON text being -decoded must be either array or object. To decode any JSON value, use -the ``JSON_DECODE_ANY`` flag (see below). - -See :ref:`rfc-conformance` for a discussion on Jansson's conformance -to the JSON specification. It explains many design decisions that -affect especially the behavior of the decoder. - -Each function takes a *flags* parameter that can be used to control -the behavior of the decoder. Its default value is 0. The following -macros can be ORed together to obtain *flags*. - -``JSON_REJECT_DUPLICATES`` - Issue a decoding error if any JSON object in the input text - contains duplicate keys. Without this flag, the value of the last - occurrence of each key ends up in the result. Key equivalence is - checked byte-by-byte, without special Unicode comparison - algorithms. - - .. versionadded:: 2.1 - -``JSON_DECODE_ANY`` - By default, the decoder expects an array or object as the input. - With this flag enabled, the decoder accepts any valid JSON value. - - **Note:** Decoding any value may be useful in some scenarios, but - it's generally discouraged as it violates strict compatibility with - :rfc:`4627`. If you use this flag, don't expect interoperability - with other JSON systems. - - .. versionadded:: 2.3 - -``JSON_DISABLE_EOF_CHECK`` - By default, the decoder expects that its whole input constitutes a - valid JSON text, and issues an error if there's extra data after - the otherwise valid JSON input. With this flag enabled, the decoder - stops after decoding a valid JSON array or object, and thus allows - extra data after the JSON text. - - Normally, reading will stop when the last ``]`` or ``}`` in the - JSON input is encountered. If both ``JSON_DISABLE_EOF_CHECK`` and - ``JSON_DECODE_ANY`` flags are used, the decoder may read one extra - UTF-8 code unit (up to 4 bytes of input). For example, decoding - ``4true`` correctly decodes the integer 4, but also reads the - ``t``. For this reason, if reading multiple consecutive values that - are not arrays or objects, they should be separated by at least one - whitespace character. - - .. versionadded:: 2.1 - -``JSON_DECODE_INT_AS_REAL`` - JSON defines only one number type. Jansson distinguishes between - ints and reals. For more information see :ref:`real-vs-integer`. - With this flag enabled the decoder interprets all numbers as real - values. Integers that do not have an exact double representation - will silently result in a loss of precision. Integers that cause - a double overflow will cause an error. - - .. versionadded:: 2.5 - -``JSON_ALLOW_NUL`` - Allow ``\u0000`` escape inside string values. This is a safety - measure; If you know your input can contain null bytes, use this - flag. If you don't use this flag, you don't have to worry about null - bytes inside strings unless you explicitly create themselves by - using e.g. :func:`json_stringn()` or ``s#`` format specifier for - :func:`json_pack()`. - - Object keys cannot have embedded null bytes even if this flag is - used. - - .. versionadded:: 2.6 - -Each function also takes an optional :type:`json_error_t` parameter -that is filled with error information if decoding fails. It's also -updated on success; the number of bytes of input read is written to -its ``position`` field. This is especially useful when using -``JSON_DISABLE_EOF_CHECK`` to read multiple consecutive JSON texts. - -.. versionadded:: 2.3 - Number of bytes of input read is written to the ``position`` field - of the :type:`json_error_t` structure. - -If no error or position information is needed, you can pass *NULL*. - -.. function:: json_t *json_loads(const char *input, size_t flags, json_error_t *error) - - .. refcounting:: new - - Decodes the JSON string *input* and returns the array or object it - contains, or *NULL* on error, in which case *error* is filled with - information about the error. *flags* is described above. - -.. function:: json_t *json_loadb(const char *buffer, size_t buflen, size_t flags, json_error_t *error) - - .. refcounting:: new - - Decodes the JSON string *buffer*, whose length is *buflen*, and - returns the array or object it contains, or *NULL* on error, in - which case *error* is filled with information about the error. This - is similar to :func:`json_loads()` except that the string doesn't - need to be null-terminated. *flags* is described above. - - .. versionadded:: 2.1 - -.. function:: json_t *json_loadf(FILE *input, size_t flags, json_error_t *error) - - .. refcounting:: new - - Decodes the JSON text in stream *input* and returns the array or - object it contains, or *NULL* on error, in which case *error* is - filled with information about the error. *flags* is described - above. - - This function will start reading the input from whatever position - the input file was in, without attempting to seek first. If an error - occurs, the file position will be left indeterminate. On success, - the file position will be at EOF, unless ``JSON_DISABLE_EOF_CHECK`` - flag was used. In this case, the file position will be at the first - character after the last ``]`` or ``}`` in the JSON input. This - allows calling :func:`json_loadf()` on the same ``FILE`` object - multiple times, if the input consists of consecutive JSON texts, - possibly separated by whitespace. - -.. function:: json_t *json_loadfd(int input, size_t flags, json_error_t *error) - - .. refcounting:: new - - Decodes the JSON text in stream *input* and returns the array or - object it contains, or *NULL* on error, in which case *error* is - filled with information about the error. *flags* is described - above. - - This function will start reading the input from whatever position - the input file descriptor was in, without attempting to seek first. - If an error occurs, the file position will be left indeterminate. - On success, the file position will be at EOF, unless - ``JSON_DISABLE_EOF_CHECK`` flag was used. In this case, the file - descriptor's position will be at the first character after the last - ``]`` or ``}`` in the JSON input. This allows calling - :func:`json_loadfd()` on the same file descriptor multiple times, - if the input consists of consecutive JSON texts, possibly separated - by whitespace. - - It is important to note that this function can only succeed on stream - file descriptors (such as SOCK_STREAM). Using this function on a - non-stream file descriptor will result in undefined behavior. For - non-stream file descriptors, see instead :func:`json_loadb()`. In - addition, please note that this function cannot be used on non-blocking - file descriptors (such as a non-blocking socket). Using this function - on non-blocking file descriptors has a high risk of data loss because - it does not support resuming. - - This function requires POSIX and fails on all non-POSIX systems. - - .. versionadded:: 2.10 - -.. function:: json_t *json_load_file(const char *path, size_t flags, json_error_t *error) - - .. refcounting:: new - - Decodes the JSON text in file *path* and returns the array or - object it contains, or *NULL* on error, in which case *error* is - filled with information about the error. *flags* is described - above. - -.. type:: json_load_callback_t - - A typedef for a function that's called by - :func:`json_load_callback()` to read a chunk of input data:: - - typedef size_t (*json_load_callback_t)(void *buffer, size_t buflen, void *data); - - *buffer* points to a buffer of *buflen* bytes, and *data* is the - corresponding :func:`json_load_callback()` argument passed through. - - On success, the function should write at most *buflen* bytes to - *buffer*, and return the number of bytes written; a returned value - of 0 indicates that no data was produced and that the end of file - has been reached. On error, the function should return - ``(size_t)-1`` to abort the decoding process. - - In UTF-8, some code points are encoded as multi-byte sequences. The - callback function doesn't need to worry about this, as Jansson - handles it at a higher level. For example, you can safely read a - fixed number of bytes from a network connection without having to - care about code unit sequences broken apart by the chunk - boundaries. - - .. versionadded:: 2.4 - -.. function:: json_t *json_load_callback(json_load_callback_t callback, void *data, size_t flags, json_error_t *error) - - .. refcounting:: new - - Decodes the JSON text produced by repeated calls to *callback*, and - returns the array or object it contains, or *NULL* on error, in - which case *error* is filled with information about the error. - *data* is passed through to *callback* on each call. *flags* is - described above. - - .. versionadded:: 2.4 - - -.. _apiref-pack: - -Building Values -=============== - -This section describes functions that help to create, or *pack*, -complex JSON values, especially nested objects and arrays. Value -building is based on a *format string* that is used to tell the -functions about the expected arguments. - -For example, the format string ``"i"`` specifies a single integer -value, while the format string ``"[ssb]"`` or the equivalent ``"[s, s, -b]"`` specifies an array value with two strings and a boolean as its -items:: - - /* Create the JSON integer 42 */ - json_pack("i", 42); - - /* Create the JSON array ["foo", "bar", true] */ - json_pack("[ssb]", "foo", "bar", 1); - -Here's the full list of format specifiers. The type in parentheses -denotes the resulting JSON type, and the type in brackets (if any) -denotes the C type that is expected as the corresponding argument or -arguments. - -``s`` (string) [const char \*] - Convert a null terminated UTF-8 string to a JSON string. - -``s?`` (string) [const char \*] - Like ``s``, but if the argument is *NULL*, output a JSON null - value. - - .. versionadded:: 2.8 - -``s*`` (string) [const char \*] - Like ``s``, but if the argument is *NULL*, do not output any value. - This format can only be used inside an object or an array. If used - inside an object, the corresponding key is additionally suppressed - when the value is omitted. See below for an example. - - .. versionadded:: 2.11 - -``s#`` (string) [const char \*, int] - Convert a UTF-8 buffer of a given length to a JSON string. - - .. versionadded:: 2.5 - -``s%`` (string) [const char \*, size_t] - Like ``s#`` but the length argument is of type :type:`size_t`. - - .. versionadded:: 2.6 - -``+`` [const char \*] - Like ``s``, but concatenate to the previous string. Only valid - after ``s``, ``s#``, ``+`` or ``+#``. - - .. versionadded:: 2.5 - -``+#`` [const char \*, int] - Like ``s#``, but concatenate to the previous string. Only valid - after ``s``, ``s#``, ``+`` or ``+#``. - - .. versionadded:: 2.5 - -``+%`` (string) [const char \*, size_t] - Like ``+#`` but the length argument is of type :type:`size_t`. - - .. versionadded:: 2.6 - -``n`` (null) - Output a JSON null value. No argument is consumed. - -``b`` (boolean) [int] - Convert a C ``int`` to JSON boolean value. Zero is converted - to ``false`` and non-zero to ``true``. - -``i`` (integer) [int] - Convert a C ``int`` to JSON integer. - -``I`` (integer) [json_int_t] - Convert a C :type:`json_int_t` to JSON integer. - -``f`` (real) [double] - Convert a C ``double`` to JSON real. - -``o`` (any value) [json_t \*] - Output any given JSON value as-is. If the value is added to an - array or object, the reference to the value passed to ``o`` is - stolen by the container. - -``O`` (any value) [json_t \*] - Like ``o``, but the argument's reference count is incremented. - This is useful if you pack into an array or object and want to - keep the reference for the JSON value consumed by ``O`` to - yourself. - -``o?``, ``O?`` (any value) [json_t \*] - Like ``o`` and ``O``, respectively, but if the argument is - *NULL*, output a JSON null value. - - .. versionadded:: 2.8 - -``o*``, ``O*`` (any value) [json_t \*] - Like ``o`` and ``O``, respectively, but if the argument is - *NULL*, do not output any value. This format can only be used - inside an object or an array. If used inside an object, the - corresponding key is additionally suppressed. See below for an - example. - - .. versionadded:: 2.11 - -``[fmt]`` (array) - Build an array with contents from the inner format string. ``fmt`` - may contain objects and arrays, i.e. recursive value building is - supported. - -``{fmt}`` (object) - Build an object with contents from the inner format string - ``fmt``. The first, third, etc. format specifier represent a key, - and must be a string (see ``s``, ``s#``, ``+`` and ``+#`` above), - as object keys are always strings. The second, fourth, etc. format - specifier represent a value. Any value may be an object or array, - i.e. recursive value building is supported. - -Whitespace, ``:`` and ``,`` are ignored. - -.. function:: json_t *json_pack(const char *fmt, ...) - - .. refcounting:: new - - Build a new JSON value according to the format string *fmt*. For - each format specifier (except for ``{}[]n``), one or more arguments - are consumed and used to build the corresponding value. Returns - *NULL* on error. - -.. function:: json_t *json_pack_ex(json_error_t *error, size_t flags, const char *fmt, ...) - json_t *json_vpack_ex(json_error_t *error, size_t flags, const char *fmt, va_list ap) - - .. refcounting:: new - - Like :func:`json_pack()`, but an in the case of an error, an error - message is written to *error*, if it's not *NULL*. The *flags* - parameter is currently unused and should be set to 0. - - As only the errors in format string (and out-of-memory errors) can - be caught by the packer, these two functions are most likely only - useful for debugging format strings. - -More examples:: - - /* Build an empty JSON object */ - json_pack("{}"); - - /* Build the JSON object {"foo": 42, "bar": 7} */ - json_pack("{sisi}", "foo", 42, "bar", 7); - - /* Like above, ':', ',' and whitespace are ignored */ - json_pack("{s:i, s:i}", "foo", 42, "bar", 7); - - /* Build the JSON array [[1, 2], {"cool": true}] */ - json_pack("[[i,i],{s:b}]", 1, 2, "cool", 1); - - /* Build a string from a non-null terminated buffer */ - char buffer[4] = {'t', 'e', 's', 't'}; - json_pack("s#", buffer, 4); - - /* Concatenate strings together to build the JSON string "foobarbaz" */ - json_pack("s++", "foo", "bar", "baz"); - - /* Create an empty object or array when optional members are missing */ - json_pack("{s:s*,s:o*,s:O*}", "foo", NULL, "bar", NULL, "baz", NULL); - json_pack("[s*,o*,O*]", NULL, NULL, NULL); - - -.. _apiref-unpack: - -Parsing and Validating Values -============================= - -This section describes functions that help to validate complex values -and extract, or *unpack*, data from them. Like :ref:`building values -`, this is also based on format strings. - -While a JSON value is unpacked, the type specified in the format -string is checked to match that of the JSON value. This is the -validation part of the process. In addition to this, the unpacking -functions can also check that all items of arrays and objects are -unpacked. This check be enabled with the format specifier ``!`` or by -using the flag ``JSON_STRICT``. See below for details. - -Here's the full list of format specifiers. The type in parentheses -denotes the JSON type, and the type in brackets (if any) denotes the C -type whose address should be passed. - -``s`` (string) [const char \*] - Convert a JSON string to a pointer to a null terminated UTF-8 - string. The resulting string is extracted by using - :func:`json_string_value()` internally, so it exists as long as - there are still references to the corresponding JSON string. - -``s%`` (string) [const char \*, size_t \*] - Convert a JSON string to a pointer to a null terminated UTF-8 - string and its length. - - .. versionadded:: 2.6 - -``n`` (null) - Expect a JSON null value. Nothing is extracted. - -``b`` (boolean) [int] - Convert a JSON boolean value to a C ``int``, so that ``true`` - is converted to 1 and ``false`` to 0. - -``i`` (integer) [int] - Convert a JSON integer to C ``int``. - -``I`` (integer) [json_int_t] - Convert a JSON integer to C :type:`json_int_t`. - -``f`` (real) [double] - Convert a JSON real to C ``double``. - -``F`` (integer or real) [double] - Convert a JSON number (integer or real) to C ``double``. - -``o`` (any value) [json_t \*] - Store a JSON value with no conversion to a :type:`json_t` pointer. - -``O`` (any value) [json_t \*] - Like ``o``, but the JSON value's reference count is incremented. - Storage pointers should be initialized NULL before using unpack. - The caller is responsible for releasing all references incremented - by unpack, even when an error occurs. - -``[fmt]`` (array) - Convert each item in the JSON array according to the inner format - string. ``fmt`` may contain objects and arrays, i.e. recursive - value extraction is supported. - -``{fmt}`` (object) - Convert each item in the JSON object according to the inner format - string ``fmt``. The first, third, etc. format specifier represent - a key, and must be ``s``. The corresponding argument to unpack - functions is read as the object key. The second, fourth, etc. - format specifier represent a value and is written to the address - given as the corresponding argument. **Note** that every other - argument is read from and every other is written to. - - ``fmt`` may contain objects and arrays as values, i.e. recursive - value extraction is supported. - - .. versionadded:: 2.3 - Any ``s`` representing a key may be suffixed with a ``?`` to - make the key optional. If the key is not found, nothing is - extracted. See below for an example. - -``!`` - This special format specifier is used to enable the check that - all object and array items are accessed, on a per-value basis. It - must appear inside an array or object as the last format specifier - before the closing bracket or brace. To enable the check globally, - use the ``JSON_STRICT`` unpacking flag. - -``*`` - This special format specifier is the opposite of ``!``. If the - ``JSON_STRICT`` flag is used, ``*`` can be used to disable the - strict check on a per-value basis. It must appear inside an array - or object as the last format specifier before the closing bracket - or brace. - -Whitespace, ``:`` and ``,`` are ignored. - -.. function:: int json_unpack(json_t *root, const char *fmt, ...) - - Validate and unpack the JSON value *root* according to the format - string *fmt*. Returns 0 on success and -1 on failure. - -.. function:: int json_unpack_ex(json_t *root, json_error_t *error, size_t flags, const char *fmt, ...) - int json_vunpack_ex(json_t *root, json_error_t *error, size_t flags, const char *fmt, va_list ap) - - Validate and unpack the JSON value *root* according to the format - string *fmt*. If an error occurs and *error* is not *NULL*, write - error information to *error*. *flags* can be used to control the - behaviour of the unpacker, see below for the flags. Returns 0 on - success and -1 on failure. - -.. note:: - - The first argument of all unpack functions is ``json_t *root`` - instead of ``const json_t *root``, because the use of ``O`` format - specifier causes the reference count of ``root``, or some value - reachable from ``root``, to be increased. Furthermore, the ``o`` - format specifier may be used to extract a value as-is, which allows - modifying the structure or contents of a value reachable from - ``root``. - - If the ``O`` and ``o`` format specifiers are not used, it's - perfectly safe to cast a ``const json_t *`` variable to plain - ``json_t *`` when used with these functions. - -The following unpacking flags are available: - -``JSON_STRICT`` - Enable the extra validation step checking that all object and - array items are unpacked. This is equivalent to appending the - format specifier ``!`` to the end of every array and object in the - format string. - -``JSON_VALIDATE_ONLY`` - Don't extract any data, just validate the JSON value against the - given format string. Note that object keys must still be specified - after the format string. - -Examples:: - - /* root is the JSON integer 42 */ - int myint; - json_unpack(root, "i", &myint); - assert(myint == 42); - - /* root is the JSON object {"foo": "bar", "quux": true} */ - const char *str; - int boolean; - json_unpack(root, "{s:s, s:b}", "foo", &str, "quux", &boolean); - assert(strcmp(str, "bar") == 0 && boolean == 1); - - /* root is the JSON array [[1, 2], {"baz": null} */ - json_error_t error; - json_unpack_ex(root, &error, JSON_VALIDATE_ONLY, "[[i,i], {s:n}]", "baz"); - /* returns 0 for validation success, nothing is extracted */ - - /* root is the JSON array [1, 2, 3, 4, 5] */ - int myint1, myint2; - json_unpack(root, "[ii!]", &myint1, &myint2); - /* returns -1 for failed validation */ - - /* root is an empty JSON object */ - int myint = 0, myint2 = 0, myint3 = 0; - json_unpack(root, "{s?i, s?[ii]}", - "foo", &myint1, - "bar", &myint2, &myint3); - /* myint1, myint2 or myint3 is no touched as "foo" and "bar" don't exist */ - - -Equality -======== - -Testing for equality of two JSON values cannot, in general, be -achieved using the ``==`` operator. Equality in the terms of the -``==`` operator states that the two :type:`json_t` pointers point to -exactly the same JSON value. However, two JSON values can be equal not -only if they are exactly the same value, but also if they have equal -"contents": - -* Two integer or real values are equal if their contained numeric - values are equal. An integer value is never equal to a real value, - though. - -* Two strings are equal if their contained UTF-8 strings are equal, - byte by byte. Unicode comparison algorithms are not implemented. - -* Two arrays are equal if they have the same number of elements and - each element in the first array is equal to the corresponding - element in the second array. - -* Two objects are equal if they have exactly the same keys and the - value for each key in the first object is equal to the value of the - corresponding key in the second object. - -* Two true, false or null values have no "contents", so they are equal - if their types are equal. (Because these values are singletons, - their equality can actually be tested with ``==``.) - -.. function:: int json_equal(json_t *value1, json_t *value2) - - Returns 1 if *value1* and *value2* are equal, as defined above. - Returns 0 if they are unequal or one or both of the pointers are - *NULL*. - - -Copying -======= - -Because of reference counting, passing JSON values around doesn't -require copying them. But sometimes a fresh copy of a JSON value is -needed. For example, if you need to modify an array, but still want to -use the original afterwards, you should take a copy of it first. - -Jansson supports two kinds of copying: shallow and deep. There is a -difference between these methods only for arrays and objects. Shallow -copying only copies the first level value (array or object) and uses -the same child values in the copied value. Deep copying makes a fresh -copy of the child values, too. Moreover, all the child values are deep -copied in a recursive fashion. - -Copying objects preserves the insertion order of keys. - -.. function:: json_t *json_copy(json_t *value) - - .. refcounting:: new - - Returns a shallow copy of *value*, or *NULL* on error. - -.. function:: json_t *json_deep_copy(const json_t *value) - - .. refcounting:: new - - Returns a deep copy of *value*, or *NULL* on error. - - -.. _apiref-custom-memory-allocation: - -Custom Memory Allocation -======================== - -By default, Jansson uses :func:`malloc()` and :func:`free()` for -memory allocation. These functions can be overridden if custom -behavior is needed. - -.. type:: json_malloc_t - - A typedef for a function pointer with :func:`malloc()`'s - signature:: - - typedef void *(*json_malloc_t)(size_t); - -.. type:: json_free_t - - A typedef for a function pointer with :func:`free()`'s - signature:: - - typedef void (*json_free_t)(void *); - -.. function:: void json_set_alloc_funcs(json_malloc_t malloc_fn, json_free_t free_fn) - - Use *malloc_fn* instead of :func:`malloc()` and *free_fn* instead - of :func:`free()`. This function has to be called before any other - Jansson's API functions to ensure that all memory operations use - the same functions. - -.. function:: void json_get_alloc_funcs(json_malloc_t *malloc_fn, json_free_t *free_fn) - - Fetch the current malloc_fn and free_fn used. Either parameter - may be NULL. - - .. versionadded:: 2.8 - -**Examples:** - -Circumvent problems with different CRT heaps on Windows by using -application's :func:`malloc()` and :func:`free()`:: - - json_set_alloc_funcs(malloc, free); - -Use the `Boehm's conservative garbage collector`_ for memory -operations:: - - json_set_alloc_funcs(GC_malloc, GC_free); - -.. _Boehm's conservative garbage collector: http://www.hboehm.info/gc/ - -Allow storing sensitive data (e.g. passwords or encryption keys) in -JSON structures by zeroing all memory when freed:: - - static void *secure_malloc(size_t size) - { - /* Store the memory area size in the beginning of the block */ - void *ptr = malloc(size + 8); - *((size_t *)ptr) = size; - return ptr + 8; - } - - static void secure_free(void *ptr) - { - size_t size; - - ptr -= 8; - size = *((size_t *)ptr); - - guaranteed_memset(ptr, 0, size + 8); - free(ptr); - } - - int main() - { - json_set_alloc_funcs(secure_malloc, secure_free); - /* ... */ - } - -For more information about the issues of storing sensitive data in -memory, see -http://www.dwheeler.com/secure-programs/Secure-Programs-HOWTO/protect-secrets.html. -The page also explains the :func:`guaranteed_memset()` function used -in the example and gives a sample implementation for it. - -.. _fixed_length_keys: - -Fixed-Length keys -================= - -The Jansson API allows work with fixed-length keys. This can be useful in the following cases: - -* The key is contained inside a buffer and is not null-terminated. In this case creating a new temporary buffer is not needed. -* The key contains U+0000 inside it. - -List of API for fixed-length keys: - -* :c:func:`json_object_getn` -* :c:func:`json_object_setn` -* :c:func:`json_object_setn_nocheck` -* :c:func:`json_object_setn_new` -* :c:func:`json_object_setn_new_nocheck` -* :c:func:`json_object_deln` -* :c:func:`json_object_iter_key_len` -* :c:func:`json_object_keylen_foreach` -* :c:func:`json_object_keylen_foreach_safe` - -**Examples:** - -Try to write a new function to get :c:struct:`json_t` by path separated by ``.`` - -This requires: - -* string iterator (no need to modify the input for better performance) -* API for working with fixed-size keys - -The iterator:: - - struct string { - const char *string; - size_t length; - }; - - size_t string_try_next(struct string *str, const char *delimiter) { - str->string += strspn(str->string, delimiter); - str->length = strcspn(str->string, delimiter); - return str->length; - } - - #define string_foreach(_string, _delimiter) \ - for (; string_try_next(&(_string), _delimiter); (_string).string += (_string).length) - - -The function:: - - json_t *json_object_get_by_path(json_t *object, const char *path) { - struct string str; - json_t *out = object; - - str.string = path; - - string_foreach(str, ".") { - out = json_object_getn(out, str.string, str.length); - if (out == NULL) - return NULL; - } - - return out; - } - -And usage:: - - int main(void) { - json_t *obj = json_pack("{s:{s:{s:b}}}", "a", "b", "c", 1); - - json_t *c = json_object_get_by_path(obj, "a.b.c"); - assert(json_is_true(c)); - - json_decref(obj); - } diff --git a/solo-ckpool-source/src/jansson-2.14/doc/changes.rst b/solo-ckpool-source/src/jansson-2.14/doc/changes.rst deleted file mode 100644 index ea56843..0000000 --- a/solo-ckpool-source/src/jansson-2.14/doc/changes.rst +++ /dev/null @@ -1,5 +0,0 @@ -****************** -Changes in Jansson -****************** - -.. include:: ../CHANGES diff --git a/solo-ckpool-source/src/jansson-2.14/doc/conf.py b/solo-ckpool-source/src/jansson-2.14/doc/conf.py deleted file mode 100644 index 2426171..0000000 --- a/solo-ckpool-source/src/jansson-2.14/doc/conf.py +++ /dev/null @@ -1,217 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Jansson documentation build configuration file, created by -# sphinx-quickstart on Sun Sep 5 21:47:20 2010. -# -# This file is execfile()d with the current directory set to its containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys, os - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('ext')) - -# -- General configuration ----------------------------------------------------- - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be extensions -# coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ['refcounting'] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Jansson' -copyright = u'2009-2020, Petri Lehtinen' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = '2.14' -# The full version, including alpha/beta/rc tags. -release = version - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['_build'] - -# The reST default role (used for this markup: `text`) to use for all documents. -default_role = 'c:func' -primary_domain = 'c' - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - - -# -- Options for HTML output --------------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -#html_theme = 'default' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -#html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -#html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'Janssondoc' - - -# -- Options for LaTeX output -------------------------------------------------- - -# The paper size ('letter' or 'a4'). -#latex_paper_size = 'letter' - -# The font size ('10pt', '11pt' or '12pt'). -#latex_font_size = '10pt' - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, author, documentclass [howto/manual]). -latex_documents = [ - ('index', 'Jansson.tex', u'Jansson Documentation', - u'Petri Lehtinen', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Additional stuff for the LaTeX preamble. -#latex_preamble = '' - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output -------------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'jansson', u'Jansson Documentation', - [u'Petri Lehtinen'], 1) -] diff --git a/solo-ckpool-source/src/jansson-2.14/doc/conformance.rst b/solo-ckpool-source/src/jansson-2.14/doc/conformance.rst deleted file mode 100644 index 5556a6b..0000000 --- a/solo-ckpool-source/src/jansson-2.14/doc/conformance.rst +++ /dev/null @@ -1,119 +0,0 @@ -.. _rfc-conformance: - -*************** -RFC Conformance -*************** - -JSON is specified in :rfc:`4627`, *"The application/json Media Type -for JavaScript Object Notation (JSON)"*. - -Character Encoding -================== - -Jansson only supports UTF-8 encoded JSON texts. It does not support or -auto-detect any of the other encodings mentioned in the RFC, namely -UTF-16LE, UTF-16BE, UTF-32LE or UTF-32BE. Pure ASCII is supported, as -it's a subset of UTF-8. - -Strings -======= - -JSON strings are mapped to C-style null-terminated character arrays, -and UTF-8 encoding is used internally. - -All Unicode codepoints U+0000 through U+10FFFF are allowed in string -values. However, U+0000 is allowed in object keys only for length-aware functions. - -Unicode normalization or any other transformation is never performed -on any strings (string values or object keys). When checking for -equivalence of strings or object keys, the comparison is performed -byte by byte between the original UTF-8 representations of the -strings. - -Numbers -======= - -.. _real-vs-integer: - -Real vs. Integer ----------------- - -JSON makes no distinction between real and integer numbers; Jansson -does. Real numbers are mapped to the ``double`` type and integers to -the ``json_int_t`` type, which is a typedef of ``long long`` or -``long``, depending on whether ``long long`` is supported by your -compiler or not. - -A JSON number is considered to be a real number if its lexical -representation includes one of ``e``, ``E``, or ``.``; regardless if -its actual numeric value is a true integer (e.g., all of ``1E6``, -``3.0``, ``400E-2``, and ``3.14E3`` are mathematical integers, but -will be treated as real values). With the ``JSON_DECODE_INT_AS_REAL`` -decoder flag set all numbers are interpreted as real. - -All other JSON numbers are considered integers. - -When encoding to JSON, real values are always represented -with a fractional part; e.g., the ``double`` value 3.0 will be -represented in JSON as ``3.0``, not ``3``. - -Overflow, Underflow & Precision -------------------------------- - -Real numbers whose absolute values are too small to be represented in -a C ``double`` will be silently estimated with 0.0. Thus, depending on -platform, JSON numbers very close to zero such as 1E-999 may result in -0.0. - -Real numbers whose absolute values are too large to be represented in -a C ``double`` will result in an overflow error (a JSON decoding -error). Thus, depending on platform, JSON numbers like 1E+999 or --1E+999 may result in a parsing error. - -Likewise, integer numbers whose absolute values are too large to be -represented in the ``json_int_t`` type (see above) will result in an -overflow error (a JSON decoding error). Thus, depending on platform, -JSON numbers like 1000000000000000 may result in parsing error. - -Parsing JSON real numbers may result in a loss of precision. As long -as overflow does not occur (i.e. a total loss of precision), the -rounded approximate value is silently used. Thus the JSON number -1.000000000000000005 may, depending on platform, result in the -``double`` value 1.0. - -Signed zeros ------------- - -JSON makes no statement about what a number means; however Javascript -(ECMAscript) does state that +0.0 and -0.0 must be treated as being -distinct values, i.e. -0.0 |not-equal| 0.0. Jansson relies on the -underlying floating point library in the C environment in which it is -compiled. Therefore it is platform-dependent whether 0.0 and -0.0 will -be distinct values. Most platforms that use the IEEE 754 -floating-point standard will support signed zeros. - -Note that this only applies to floating-point; neither JSON, C, or -IEEE support the concept of signed integer zeros. - -.. |not-equal| unicode:: U+2260 - -Types ------ - -No support is provided in Jansson for any C numeric types other than -``json_int_t`` and ``double``. This excludes things such as unsigned -types, ``long double``, etc. Obviously, shorter types like ``short``, -``int``, ``long`` (if ``json_int_t`` is ``long long``) and ``float`` -are implicitly handled via the ordinary C type coercion rules (subject -to overflow semantics). Also, no support or hooks are provided for any -supplemental "bignum" type add-on packages. - -Depth of nested values -====================== - -To avoid stack exhaustion, Jansson currently limits the nesting depth -for arrays and objects to a certain value (default: 2048), defined as -a macro ``JSON_PARSER_MAX_DEPTH`` within ``jansson_config.h``. - -The limit is allowed to be set by the RFC; there is no recommended value -or required minimum depth to be supported. diff --git a/solo-ckpool-source/src/jansson-2.14/doc/ext/refcounting.py b/solo-ckpool-source/src/jansson-2.14/doc/ext/refcounting.py deleted file mode 100644 index e72c481..0000000 --- a/solo-ckpool-source/src/jansson-2.14/doc/ext/refcounting.py +++ /dev/null @@ -1,69 +0,0 @@ -""" - refcounting - ~~~~~~~~~~~ - - Reference count annotations for C API functions. Has the same - result as the sphinx.ext.refcounting extension but works for all - functions regardless of the signature, and the reference counting - information is written inline with the documentation instead of a - separate file. - - Adds a new directive "refcounting". The directive has no content - and one required positional parameter:: "new" or "borrow". - - Example: - - .. cfunction:: json_t *json_object(void) - - .. refcounting:: new - - - - :copyright: Copyright (c) 2009-2016 Petri Lehtinen - :license: MIT, see LICENSE for details. -""" - -from docutils import nodes -from docutils.parsers.rst import Directive - - -def visit(self, node): - self.visit_emphasis(node) - -def depart(self, node): - self.depart_emphasis(node) - -def html_visit(self, node): - self.body.append(self.starttag(node, 'em', '', CLASS='refcount')) - -def html_depart(self, node): - self.body.append('') - - -class refcounting(nodes.emphasis): - pass - -class refcounting_directive(Directive): - has_content = False - required_arguments = 1 - optional_arguments = 0 - final_argument_whitespace = False - - def run(self): - if self.arguments[0] == 'borrow': - text = 'Return value: Borrowed reference.' - elif self.arguments[0] == 'new': - text = 'Return value: New reference.' - else: - raise Error('Valid arguments: new, borrow') - - return [refcounting(text, text)] - - -def setup(app): - app.add_node(refcounting, - html=(html_visit, html_depart), - latex=(visit, depart), - text=(visit, depart), - man=(visit, depart)) - app.add_directive('refcounting', refcounting_directive) diff --git a/solo-ckpool-source/src/jansson-2.14/doc/gettingstarted.rst b/solo-ckpool-source/src/jansson-2.14/doc/gettingstarted.rst deleted file mode 100644 index 4cd1977..0000000 --- a/solo-ckpool-source/src/jansson-2.14/doc/gettingstarted.rst +++ /dev/null @@ -1,264 +0,0 @@ -*************** -Getting Started -*************** - -.. highlight:: c - -Compiling and Installing Jansson -================================ - -The Jansson source is available at -http://www.digip.org/jansson/releases/. - -Unix-like systems (including MinGW) ------------------------------------ - -Unpack the source tarball and change to the source directory: - -.. parsed-literal:: - - bunzip2 -c jansson-|release|.tar.bz2 | tar xf - - cd jansson-|release| - -The source uses GNU Autotools (autoconf_, automake_, libtool_), so -compiling and installing is extremely simple:: - - ./configure - make - make check - make install - -To change the destination directory (``/usr/local`` by default), use -the ``--prefix=DIR`` argument to ``./configure``. See ``./configure ---help`` for the list of all possible configuration options. - -The command ``make check`` runs the test suite distributed with -Jansson. This step is not strictly necessary, but it may find possible -problems that Jansson has on your platform. If any problems are found, -please report them. - -If you obtained the source from a Git repository (or any other source -control system), there's no ``./configure`` script as it's not kept in -version control. To create the script, the build system needs to be -bootstrapped. There are many ways to do this, but the easiest one is -to use ``autoreconf``:: - - autoreconf -fi - -This command creates the ``./configure`` script, which can then be -used as described above. - -.. _autoconf: http://www.gnu.org/software/autoconf/ -.. _automake: http://www.gnu.org/software/automake/ -.. _libtool: http://www.gnu.org/software/libtool/ - - -.. _build-cmake: - -CMake (various platforms, including Windows) --------------------------------------------- - -Jansson can be built using CMake_. Create a build directory for an -out-of-tree build, change to that directory, and run ``cmake`` (or ``ccmake``, -``cmake-gui``, or similar) to configure the project. - -See the examples below for more detailed information. - -.. note:: In the below examples ``..`` is used as an argument for ``cmake``. - This is simply the path to the jansson project root directory. - In the example it is assumed you've created a sub-directory ``build`` - and are using that. You could use any path you want. - -.. _build-cmake-unix: - -Unix (Make files) -^^^^^^^^^^^^^^^^^ -Generating make files on unix: - -.. parsed-literal:: - - bunzip2 -c jansson-|release|.tar.bz2 | tar xf - - cd jansson-|release| - - mkdir build - cd build - cmake .. # or ccmake .. for a GUI. - -.. note:: - - If you don't want to build docs or ``Sphinx`` is not installed, you should add ``"-DJANSSON_BUILD_DOCS=OFF"`` in the ``cmake`` command. - - -Then to build:: - - make - make check - make install - -Windows (Visual Studio) -^^^^^^^^^^^^^^^^^^^^^^^ -Creating Visual Studio project files from the command line: - -.. parsed-literal:: - - - cd jansson-|release| - - md build - cd build - cmake -G "Visual Studio 15 2017" .. - -.. note:: - - You should replace the name of the generator (``-G`` flag) matching - the Visual Studio version installed on your system. Currently, the - following versions are supported: - - - ``Visual Studio 9 2008`` - - ``Visual Studio 10 2010`` - - ``Visual Studio 11 2012`` - - ``Visual Studio 12 2013`` - - ``Visual Studio 14 2015`` - - ``Visual Studio 15 2017`` - - ``Visual Studio 16 2019`` - - Any later version should also work. - -You will now have a *Visual Studio Solution* in your build directory. -To run the unit tests build the ``RUN_TESTS`` project. - -If you prefer a GUI the ``cmake`` line in the above example can -be replaced with:: - - cmake-gui .. - -For command line help (including a list of available generators) -for CMake_ simply run:: - - cmake - -To list available CMake_ settings (and what they are currently set to) -for the project, run:: - - cmake -LH .. - -Windows (MinGW) -^^^^^^^^^^^^^^^ -If you prefer using MinGW on Windows, make sure MinGW installed and ``{MinGW}/bin`` has been added to ``PATH``, then do the following commands: - -.. parsed-literal:: - - - cd jansson-|release| - - md build - cd build - cmake -G "MinGW Makefiles" .. - mingw32-make - - -Mac OSX (Xcode) -^^^^^^^^^^^^^^^ -If you prefer using Xcode instead of make files on OSX, -do the following. (Use the same steps as -for :ref:`Unix `):: - - ... - cmake -G "Xcode" .. - -Additional CMake settings -^^^^^^^^^^^^^^^^^^^^^^^^^ - -Shared library -"""""""""""""" -By default the CMake_ project will generate build files for building the -static library. To build the shared version use:: - - ... - cmake -DJANSSON_BUILD_SHARED_LIBS=1 .. - -Changing install directory (same as autoconf --prefix) -"""""""""""""""""""""""""""""""""""""""""""""""""""""" -Just as with the autoconf_ project you can change the destination directory -for ``make install``. The equivalent for autoconfs ``./configure --prefix`` -in CMake_ is:: - - ... - cmake -DCMAKE_INSTALL_PREFIX:PATH=/some/other/path .. - make install - -.. _CMake: http://www.cmake.org - - -Android -------- - -Jansson can be built for Android platforms. Android.mk is in the -source root directory. The configuration header file is located in the -``android`` directory in the source distribution. - - -Other Systems -------------- - -On non Unix-like systems, you may be unable to run the ``./configure`` -script. In this case, follow these steps. All the files mentioned can -be found in the ``src/`` directory. - -1. Create ``jansson_config.h`` (which has some platform-specific - parameters that are normally filled in by the ``./configure`` - script). Edit ``jansson_config.h.in``, replacing all ``@variable@`` - placeholders, and rename the file to ``jansson_config.h``. - -2. Make ``jansson.h`` and ``jansson_config.h`` available to the - compiler, so that they can be found when compiling programs that - use Jansson. - -3. Compile all the ``.c`` files (in the ``src/`` directory) into a - library file. Make the library available to the compiler, as in - step 2. - - -Building the Documentation --------------------------- - -(This subsection describes how to build the HTML documentation you are -currently reading, so it can be safely skipped.) - -Documentation is in the ``doc/`` subdirectory. It's written in -reStructuredText_ with Sphinx_ annotations. To generate the HTML -documentation, invoke:: - - make html - -and point your browser to ``doc/_build/html/index.html``. Sphinx_ 1.0 -or newer is required to generate the documentation. - -.. _reStructuredText: http://docutils.sourceforge.net/rst.html -.. _Sphinx: http://sphinx.pocoo.org/ - - -Compiling Programs that Use Jansson -=================================== - -Jansson involves one C header file, :file:`jansson.h`, so it's enough -to put the line - -:: - - #include - -in the beginning of every source file that uses Jansson. - -There's also just one library to link with, ``libjansson``. Compile and -link the program as follows:: - - cc -o prog prog.c -ljansson - -Starting from version 1.2, there's also support for pkg-config_: - -.. code-block:: shell - - cc -o prog prog.c `pkg-config --cflags --libs jansson` - -.. _pkg-config: http://pkg-config.freedesktop.org/ diff --git a/solo-ckpool-source/src/jansson-2.14/doc/github_commits.c b/solo-ckpool-source/src/jansson-2.14/doc/github_commits.c deleted file mode 100644 index c020f46..0000000 --- a/solo-ckpool-source/src/jansson-2.14/doc/github_commits.c +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Copyright (c) 2009-2016 Petri Lehtinen - * - * Jansson is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ - -#include -#include - -#include -#include - -#define BUFFER_SIZE (256 * 1024) /* 256 KB */ - -#define URL_FORMAT "https://api.github.com/repos/%s/%s/commits" -#define URL_SIZE 256 - -/* Return the offset of the first newline in text or the length of - text if there's no newline */ -static int newline_offset(const char *text) { - const char *newline = strchr(text, '\n'); - if (!newline) - return strlen(text); - else - return (int)(newline - text); -} - -struct write_result { - char *data; - int pos; -}; - -static size_t write_response(void *ptr, size_t size, size_t nmemb, void *stream) { - struct write_result *result = (struct write_result *)stream; - - if (result->pos + size * nmemb >= BUFFER_SIZE - 1) { - fprintf(stderr, "error: too small buffer\n"); - return 0; - } - - memcpy(result->data + result->pos, ptr, size * nmemb); - result->pos += size * nmemb; - - return size * nmemb; -} - -static char *request(const char *url) { - CURL *curl = NULL; - CURLcode status; - struct curl_slist *headers = NULL; - char *data = NULL; - long code; - - curl_global_init(CURL_GLOBAL_ALL); - curl = curl_easy_init(); - if (!curl) - goto error; - - data = malloc(BUFFER_SIZE); - if (!data) - goto error; - - struct write_result write_result = {.data = data, .pos = 0}; - - curl_easy_setopt(curl, CURLOPT_URL, url); - - /* GitHub commits API v3 requires a User-Agent header */ - headers = curl_slist_append(headers, "User-Agent: Jansson-Tutorial"); - curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers); - - curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_response); - curl_easy_setopt(curl, CURLOPT_WRITEDATA, &write_result); - - status = curl_easy_perform(curl); - if (status != 0) { - fprintf(stderr, "error: unable to request data from %s:\n", url); - fprintf(stderr, "%s\n", curl_easy_strerror(status)); - goto error; - } - - curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &code); - if (code != 200) { - fprintf(stderr, "error: server responded with code %ld\n", code); - goto error; - } - - curl_easy_cleanup(curl); - curl_slist_free_all(headers); - curl_global_cleanup(); - - /* zero-terminate the result */ - data[write_result.pos] = '\0'; - - return data; - -error: - if (data) - free(data); - if (curl) - curl_easy_cleanup(curl); - if (headers) - curl_slist_free_all(headers); - curl_global_cleanup(); - return NULL; -} - -int main(int argc, char *argv[]) { - size_t i; - char *text; - char url[URL_SIZE]; - - json_t *root; - json_error_t error; - - if (argc != 3) { - fprintf(stderr, "usage: %s USER REPOSITORY\n\n", argv[0]); - fprintf(stderr, "List commits at USER's REPOSITORY.\n\n"); - return 2; - } - - snprintf(url, URL_SIZE, URL_FORMAT, argv[1], argv[2]); - - text = request(url); - if (!text) - return 1; - - root = json_loads(text, 0, &error); - free(text); - - if (!root) { - fprintf(stderr, "error: on line %d: %s\n", error.line, error.text); - return 1; - } - - if (!json_is_array(root)) { - fprintf(stderr, "error: root is not an array\n"); - json_decref(root); - return 1; - } - - for (i = 0; i < json_array_size(root); i++) { - json_t *data, *sha, *commit, *message; - const char *message_text; - - data = json_array_get(root, i); - if (!json_is_object(data)) { - fprintf(stderr, "error: commit data %d is not an object\n", (int)(i + 1)); - json_decref(root); - return 1; - } - - sha = json_object_get(data, "sha"); - if (!json_is_string(sha)) { - fprintf(stderr, "error: commit %d: sha is not a string\n", (int)(i + 1)); - return 1; - } - - commit = json_object_get(data, "commit"); - if (!json_is_object(commit)) { - fprintf(stderr, "error: commit %d: commit is not an object\n", (int)(i + 1)); - json_decref(root); - return 1; - } - - message = json_object_get(commit, "message"); - if (!json_is_string(message)) { - fprintf(stderr, "error: commit %d: message is not a string\n", (int)(i + 1)); - json_decref(root); - return 1; - } - - message_text = json_string_value(message); - printf("%.8s %.*s\n", json_string_value(sha), newline_offset(message_text), - message_text); - } - - json_decref(root); - return 0; -} diff --git a/solo-ckpool-source/src/jansson-2.14/doc/index.rst b/solo-ckpool-source/src/jansson-2.14/doc/index.rst deleted file mode 100644 index c679f40..0000000 --- a/solo-ckpool-source/src/jansson-2.14/doc/index.rst +++ /dev/null @@ -1,53 +0,0 @@ -Jansson Documentation -===================== - -This is the documentation for Jansson_ |release|, last updated |today|. - -Introduction ------------- - -Jansson_ is a C library for encoding, decoding and manipulating JSON -data. Its main features and design principles are: - -- Simple and intuitive API and data model - -- Comprehensive documentation - -- No dependencies on other libraries - -- Full Unicode support (UTF-8) - -- Extensive test suite - -Jansson is licensed under the `MIT license`_; see LICENSE in the -source distribution for details. - -Jansson is used in production and its API is stable. It works on -numerous platforms, including numerous Unix like systems and Windows. -It's suitable for use on any system, including desktop, server, and -small embedded systems. - - -.. _`MIT license`: http://www.opensource.org/licenses/mit-license.php -.. _Jansson: http://www.digip.org/jansson/ - -Contents --------- - -.. toctree:: - :maxdepth: 2 - - gettingstarted - upgrading - tutorial - conformance - threadsafety - apiref - changes - - -Indices and Tables -================== - -* :ref:`genindex` -* :ref:`search` diff --git a/solo-ckpool-source/src/jansson-2.14/doc/threadsafety.rst b/solo-ckpool-source/src/jansson-2.14/doc/threadsafety.rst deleted file mode 100644 index 0eebb29..0000000 --- a/solo-ckpool-source/src/jansson-2.14/doc/threadsafety.rst +++ /dev/null @@ -1,82 +0,0 @@ -.. _thread-safety: - -************* -Thread safety -************* - -Jansson as a library is thread safe and has no mutable global state. -The only exceptions are the hash function seed and memory allocation -functions, see below. - -There's no locking performed inside Jansson's code. **Read-only** -access to JSON values shared by multiple threads is safe, but -**mutating** a JSON value that's shared by multiple threads is not. A -multithreaded program must perform its own locking if JSON values -shared by multiple threads are mutated. - -However, **reference count manipulation** (:func:`json_incref()`, -:func:`json_decref()`) is usually thread-safe, and can be performed on -JSON values that are shared among threads. The thread-safety of -reference counting can be checked with the -``JANSSON_THREAD_SAFE_REFCOUNT`` preprocessor constant. Thread-safe -reference count manipulation is achieved using compiler built-in -atomic functions, which are available in most modern compilers. - -If compiler support is not available (``JANSSON_THREAD_SAFE_REFCOUNT`` -is not defined), it may be very difficult to ensure thread safety of -reference counting. It's possible to have a reference to a value -that's also stored inside an array or object in another thread. -Modifying the container (adding or removing values) may trigger -concurrent access to such values, as containers manage the reference -count of their contained values. - - -Hash function seed -================== - -To prevent an attacker from intentionally causing large JSON objects -with specially crafted keys to perform very slow, the hash function -used by Jansson is randomized using a seed value. The seed is -automatically generated on the first explicit or implicit call to -:func:`json_object()`, if :func:`json_object_seed()` has not been -called beforehand. - -The seed is generated by using operating system's entropy sources if -they are available (``/dev/urandom``, ``CryptGenRandom()``). The -initialization is done in as thread safe manner as possible, by using -architecture specific lockless operations if provided by the platform -or the compiler. - -If you're using threads, it's recommended to autoseed the hashtable -explicitly before spawning any threads by calling -``json_object_seed(0)`` , especially if you're unsure whether the -initialization is thread safe on your platform. - - -Memory allocation functions -=========================== - -Memory allocation functions should be set at most once, and only on -program startup. See :ref:`apiref-custom-memory-allocation`. - - -Locale -====== - -Jansson works fine under any locale. - -However, if the host program is multithreaded and uses ``setlocale()`` -to switch the locale in one thread while Jansson is currently encoding -or decoding JSON data in another thread, the result may be wrong or -the program may even crash. - -Jansson uses locale specific functions for certain string conversions -in the encoder and decoder, and then converts the locale specific -values to/from the JSON representation. This fails if the locale -changes between the string conversion and the locale-to-JSON -conversion. This can only happen in multithreaded programs that use -``setlocale()``, because ``setlocale()`` switches the locale for all -running threads, not only the thread that calls ``setlocale()``. - -If your program uses ``setlocale()`` as described above, consider -using the thread-safe ``uselocale()`` instead. diff --git a/solo-ckpool-source/src/jansson-2.14/doc/tutorial.rst b/solo-ckpool-source/src/jansson-2.14/doc/tutorial.rst deleted file mode 100644 index bb7a6c2..0000000 --- a/solo-ckpool-source/src/jansson-2.14/doc/tutorial.rst +++ /dev/null @@ -1,288 +0,0 @@ -.. _tutorial: - -******** -Tutorial -******** - -.. highlight:: c - -In this tutorial, we create a program that fetches the latest commits -of a repository in GitHub_ over the web. `GitHub API`_ uses JSON, so -the result can be parsed using Jansson. - -To stick to the scope of this tutorial, we will only cover the -parts of the program related to handling JSON data. For the best user -experience, the full source code is available: -:download:`github_commits.c`. To compile it (on Unix-like systems with -gcc), use the following command:: - - gcc -o github_commits github_commits.c -ljansson -lcurl - -libcurl_ is used to communicate over the web, so it is required to -compile the program. - -The command line syntax is:: - - github_commits USER REPOSITORY - -``USER`` is a GitHub user ID and ``REPOSITORY`` is the repository -name. Please note that the GitHub API is rate limited, so if you run -the program too many times within a short period of time, the sever -starts to respond with an error. - -.. _GitHub: https://github.com/ -.. _GitHub API: http://developer.github.com/ -.. _libcurl: http://curl.haxx.se/ - - -.. _tutorial-github-commits-api: - -The GitHub Repo Commits API -=========================== - -The `GitHub Repo Commits API`_ is used by sending HTTP requests to -URLs like ``https://api.github.com/repos/USER/REPOSITORY/commits``, -where ``USER`` and ``REPOSITORY`` are the GitHub user ID and the name -of the repository whose commits are to be listed, respectively. - -GitHub responds with a JSON array of the following form: - -.. code-block:: none - - [ - { - "sha": "", - "commit": { - "message": "", - - }, - - }, - { - "sha": "", - "commit": { - "message": "", - - }, - - }, - - ] - -In our program, the HTTP request is sent using the following -function:: - - static char *request(const char *url); - -It takes the URL as a parameter, performs a HTTP GET request, and -returns a newly allocated string that contains the response body. If -the request fails, an error message is printed to stderr and the -return value is *NULL*. For full details, refer to :download:`the code -`, as the actual implementation is not important -here. - -.. _GitHub Repo Commits API: http://developer.github.com/v3/repos/commits/ - -.. _tutorial-the-program: - -The Program -=========== - -First the includes:: - - #include - #include - -Like all the programs using Jansson, we need to include -:file:`jansson.h`. - -The following definitions are used to build the GitHub API request -URL:: - - #define URL_FORMAT "https://api.github.com/repos/%s/%s/commits" - #define URL_SIZE 256 - -The following function is used when formatting the result to find the -first newline in the commit message:: - - /* Return the offset of the first newline in text or the length of - text if there's no newline */ - static int newline_offset(const char *text) - { - const char *newline = strchr(text, '\n'); - if(!newline) - return strlen(text); - else - return (int)(newline - text); - } - -The main function follows. In the beginning, we first declare a bunch -of variables and check the command line parameters:: - - int main(int argc, char *argv[]) - { - size_t i; - char *text; - char url[URL_SIZE]; - - json_t *root; - json_error_t error; - - if(argc != 3) - { - fprintf(stderr, "usage: %s USER REPOSITORY\n\n", argv[0]); - fprintf(stderr, "List commits at USER's REPOSITORY.\n\n"); - return 2; - } - -Then we build the request URL using the user and repository names -given as command line parameters:: - - snprintf(url, URL_SIZE, URL_FORMAT, argv[1], argv[2]); - -This uses the ``URL_SIZE`` and ``URL_FORMAT`` constants defined above. -Now we're ready to actually request the JSON data over the web:: - - text = request(url); - if(!text) - return 1; - -If an error occurs, our function ``request`` prints the error and -returns *NULL*, so it's enough to just return 1 from the main -function. - -Next we'll call :func:`json_loads()` to decode the JSON text we got -as a response:: - - root = json_loads(text, 0, &error); - free(text); - - if(!root) - { - fprintf(stderr, "error: on line %d: %s\n", error.line, error.text); - return 1; - } - -We don't need the JSON text anymore, so we can free the ``text`` -variable right after decoding it. If :func:`json_loads()` fails, it -returns *NULL* and sets error information to the :type:`json_error_t` -structure given as the second parameter. In this case, our program -prints the error information out and returns 1 from the main function. - -Now we're ready to extract the data out of the decoded JSON response. -The structure of the response JSON was explained in section -:ref:`tutorial-github-commits-api`. - -We check that the returned value really is an array:: - - if(!json_is_array(root)) - { - fprintf(stderr, "error: root is not an array\n"); - json_decref(root); - return 1; - } - -Then we proceed to loop over all the commits in the array:: - - for(i = 0; i < json_array_size(root); i++) - { - json_t *data, *sha, *commit, *message; - const char *message_text; - - data = json_array_get(root, i); - if(!json_is_object(data)) - { - fprintf(stderr, "error: commit data %d is not an object\n", i + 1); - json_decref(root); - return 1; - } - ... - -The function :func:`json_array_size()` returns the size of a JSON -array. First, we again declare some variables and then extract the -i'th element of the ``root`` array using :func:`json_array_get()`. -We also check that the resulting value is a JSON object. - -Next we'll extract the commit ID (a hexadecimal SHA-1 sum), -intermediate commit info object, and the commit message from that -object. We also do proper type checks:: - - sha = json_object_get(data, "sha"); - if(!json_is_string(sha)) - { - fprintf(stderr, "error: commit %d: sha is not a string\n", i + 1); - json_decref(root); - return 1; - } - - commit = json_object_get(data, "commit"); - if(!json_is_object(commit)) - { - fprintf(stderr, "error: commit %d: commit is not an object\n", i + 1); - json_decref(root); - return 1; - } - - message = json_object_get(commit, "message"); - if(!json_is_string(message)) - { - fprintf(stderr, "error: commit %d: message is not a string\n", i + 1); - json_decref(root); - return 1; - } - ... - -And finally, we'll print the first 8 characters of the commit ID and -the first line of the commit message. A C-style string is extracted -from a JSON string using :func:`json_string_value()`:: - - message_text = json_string_value(message); - printf("%.8s %.*s\n", - json_string_value(sha), - newline_offset(message_text), - message_text); - } - -After sending the HTTP request, we decoded the JSON text using -:func:`json_loads()`, remember? It returns a *new reference* to the -JSON value it decodes. When we're finished with the value, we'll need -to decrease the reference count using :func:`json_decref()`. This way -Jansson can release the resources:: - - json_decref(root); - return 0; - -For a detailed explanation of reference counting in Jansson, see -:ref:`apiref-reference-count` in :ref:`apiref`. - -The program's ready, let's test it and view the latest commits in -Jansson's repository: - -.. code-block:: shell - - $ ./github_commits akheron jansson - 1581f26a Merge branch '2.3' - aabfd493 load: Change buffer_pos to be a size_t - bd72efbd load: Avoid unexpected behaviour in macro expansion - e8fd3e30 Document and tweak json_load_callback() - 873eddaf Merge pull request #60 from rogerz/contrib - bd2c0c73 Ignore the binary test_load_callback - 17a51a4b Merge branch '2.3' - 09c39adc Add json_load_callback to the list of exported symbols - cbb80baf Merge pull request #57 from rogerz/contrib - 040bd7b0 Add json_load_callback() - 2637faa4 Make test stripping locale independent - <...> - - -Conclusion -========== - -In this tutorial, we implemented a program that fetches the latest -commits of a GitHub repository using the GitHub Repo Commits API. -Jansson was used to decode the JSON response and to extract the commit -data. - -This tutorial only covered a small part of Jansson. For example, we -did not create or manipulate JSON values at all. Proceed to -:ref:`apiref` to explore all features of Jansson. diff --git a/solo-ckpool-source/src/jansson-2.14/doc/upgrading.rst b/solo-ckpool-source/src/jansson-2.14/doc/upgrading.rst deleted file mode 100644 index 94ff7de..0000000 --- a/solo-ckpool-source/src/jansson-2.14/doc/upgrading.rst +++ /dev/null @@ -1,76 +0,0 @@ -.. highlight:: c - -****************** -Upgrading from 1.x -****************** - -This chapter lists the backwards incompatible changes introduced in -Jansson 2.0, and the steps that are needed for upgrading your code. - -**The incompatibilities are not dramatic.** The biggest change is that -all decoding functions now require and extra parameter. Most programs -can be modified to work with 2.0 by adding a ``0`` as the second -parameter to all calls of :func:`json_loads()`, :func:`json_loadf()` -and :func:`json_load_file()`. - - -Compatibility -============= - -Jansson 2.0 is backwards incompatible with the Jansson 1.x releases. -It is ABI incompatible, i.e. all programs dynamically linking to the -Jansson library need to be recompiled. It's also API incompatible, -i.e. the source code of programs using Jansson 1.x may need -modifications to make them compile against Jansson 2.0. - -All the 2.x releases are guaranteed to be backwards compatible for -both ABI and API, so no recompilation or source changes are needed -when upgrading from 2.x to 2.y. - - -List of Incompatible Changes -============================ - -**Decoding flags** - For future needs, a ``flags`` parameter was added as the second - parameter to all decoding functions, i.e. :func:`json_loads()`, - :func:`json_loadf()` and :func:`json_load_file()`. All calls to - these functions need to be changed by adding a ``0`` as the second - argument. For example:: - - /* old code */ - json_loads(input, &error); - - /* new code */ - json_loads(input, 0, &error); - - -**Underlying type of JSON integers** - The underlying C type of JSON integers has been changed from - ``int`` to the widest available signed integer type, i.e. - ``long long`` or ``long``, depending on whether - ``long long`` is supported on your system or not. This makes - the whole 64-bit integer range available on most modern systems. - - ``jansson.h`` has a typedef :type:`json_int_t` to the underlying - integer type. ``int`` should still be used in most cases when - dealing with smallish JSON integers, as the compiler handles - implicit type coercion. Only when the full 64-bit range is needed, - :type:`json_int_t` should be explicitly used. - - -**Maximum encoder indentation depth** - The maximum argument of the ``JSON_INDENT()`` macro has been - changed from 255 to 31, to free up bits from the ``flags`` - parameter of :func:`json_dumps()`, :func:`json_dumpf()` and - :func:`json_dump_file()`. If your code uses a bigger indentation - than 31, it needs to be changed. - - -**Unsigned integers in API functions** - Version 2.0 unifies unsigned integer usage in the API. All uses of - ``unsigned int`` and ``unsigned long`` have been replaced - with ``size_t``. This includes flags, container sizes, etc. - This should not require source code changes, as both - ``unsigned int`` and ``unsigned long`` are usually - compatible with ``size_t``. diff --git a/solo-ckpool-source/src/jansson-2.14/examples/README.rst b/solo-ckpool-source/src/jansson-2.14/examples/README.rst deleted file mode 100644 index a7c5274..0000000 --- a/solo-ckpool-source/src/jansson-2.14/examples/README.rst +++ /dev/null @@ -1,4 +0,0 @@ -Jansson examples -================ - -This directory contains simple example programs that use Jansson. diff --git a/solo-ckpool-source/src/jansson-2.14/examples/simple_parse.c b/solo-ckpool-source/src/jansson-2.14/examples/simple_parse.c deleted file mode 100644 index a96a0f8..0000000 --- a/solo-ckpool-source/src/jansson-2.14/examples/simple_parse.c +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Simple example of parsing and printing JSON using jansson. - * - * SYNOPSIS: - * $ examples/simple_parse - * Type some JSON > [true, false, null, 1, 0.0, -0.0, "", {"name": "barney"}] - * JSON Array of 8 elements: - * JSON True - * JSON False - * JSON Null - * JSON Integer: "1" - * JSON Real: 0.000000 - * JSON Real: -0.000000 - * JSON String: "" - * JSON Object of 1 pair: - * JSON Key: "name" - * JSON String: "barney" - * - * Copyright (c) 2014 Robert Poor - * - * Jansson is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ - -#include -#include -#include - -/* forward refs */ -void print_json(json_t *root); -void print_json_aux(json_t *element, int indent); -void print_json_indent(int indent); -const char *json_plural(size_t count); -void print_json_object(json_t *element, int indent); -void print_json_array(json_t *element, int indent); -void print_json_string(json_t *element, int indent); -void print_json_integer(json_t *element, int indent); -void print_json_real(json_t *element, int indent); -void print_json_true(json_t *element, int indent); -void print_json_false(json_t *element, int indent); -void print_json_null(json_t *element, int indent); - -void print_json(json_t *root) { print_json_aux(root, 0); } - -void print_json_aux(json_t *element, int indent) { - switch (json_typeof(element)) { - case JSON_OBJECT: - print_json_object(element, indent); - break; - case JSON_ARRAY: - print_json_array(element, indent); - break; - case JSON_STRING: - print_json_string(element, indent); - break; - case JSON_INTEGER: - print_json_integer(element, indent); - break; - case JSON_REAL: - print_json_real(element, indent); - break; - case JSON_TRUE: - print_json_true(element, indent); - break; - case JSON_FALSE: - print_json_false(element, indent); - break; - case JSON_NULL: - print_json_null(element, indent); - break; - default: - fprintf(stderr, "unrecognized JSON type %d\n", json_typeof(element)); - } -} - -void print_json_indent(int indent) { - int i; - for (i = 0; i < indent; i++) { - putchar(' '); - } -} - -const char *json_plural(size_t count) { return count == 1 ? "" : "s"; } - -void print_json_object(json_t *element, int indent) { - size_t size; - const char *key; - json_t *value; - - print_json_indent(indent); - size = json_object_size(element); - - printf("JSON Object of %lld pair%s:\n", (long long)size, json_plural(size)); - json_object_foreach(element, key, value) { - print_json_indent(indent + 2); - printf("JSON Key: \"%s\"\n", key); - print_json_aux(value, indent + 2); - } -} - -void print_json_array(json_t *element, int indent) { - size_t i; - size_t size = json_array_size(element); - print_json_indent(indent); - - printf("JSON Array of %lld element%s:\n", (long long)size, json_plural(size)); - for (i = 0; i < size; i++) { - print_json_aux(json_array_get(element, i), indent + 2); - } -} - -void print_json_string(json_t *element, int indent) { - print_json_indent(indent); - printf("JSON String: \"%s\"\n", json_string_value(element)); -} - -void print_json_integer(json_t *element, int indent) { - print_json_indent(indent); - printf("JSON Integer: \"%" JSON_INTEGER_FORMAT "\"\n", json_integer_value(element)); -} - -void print_json_real(json_t *element, int indent) { - print_json_indent(indent); - printf("JSON Real: %f\n", json_real_value(element)); -} - -void print_json_true(json_t *element, int indent) { - (void)element; - print_json_indent(indent); - printf("JSON True\n"); -} - -void print_json_false(json_t *element, int indent) { - (void)element; - print_json_indent(indent); - printf("JSON False\n"); -} - -void print_json_null(json_t *element, int indent) { - (void)element; - print_json_indent(indent); - printf("JSON Null\n"); -} - -/* - * Parse text into a JSON object. If text is valid JSON, returns a - * json_t structure, otherwise prints and error and returns null. - */ -json_t *load_json(const char *text) { - json_t *root; - json_error_t error; - - root = json_loads(text, 0, &error); - - if (root) { - return root; - } else { - fprintf(stderr, "json error on line %d: %s\n", error.line, error.text); - return (json_t *)0; - } -} - -/* - * Print a prompt and return (by reference) a null-terminated line of - * text. Returns NULL on eof or some error. - */ -char *read_line(char *line, int max_chars) { - printf("Type some JSON > "); - fflush(stdout); - return fgets(line, max_chars, stdin); -} - -/* ================================================================ - * main - */ - -#define MAX_CHARS 4096 - -int main(int argc, char *argv[]) { - char line[MAX_CHARS]; - - if (argc != 1) { - fprintf(stderr, "Usage: %s\n", argv[0]); - exit(-1); - } - - while (read_line(line, MAX_CHARS) != (char *)NULL) { - - /* parse text into JSON structure */ - json_t *root = load_json(line); - - if (root) { - /* print and release the JSON structure */ - print_json(root); - json_decref(root); - } - } - - return 0; -} diff --git a/solo-ckpool-source/src/jansson-2.14/jansson.pc.in b/solo-ckpool-source/src/jansson-2.14/jansson.pc.in deleted file mode 100644 index 69c9a43..0000000 --- a/solo-ckpool-source/src/jansson-2.14/jansson.pc.in +++ /dev/null @@ -1,10 +0,0 @@ -prefix=@prefix@ -exec_prefix=@exec_prefix@ -libdir=@libdir@ -includedir=@includedir@ - -Name: Jansson -Description: Library for encoding, decoding and manipulating JSON data -Version: @VERSION@ -Libs: -L${libdir} -ljansson -Cflags: -I${includedir} diff --git a/solo-ckpool-source/src/jansson-2.14/jansson_private_config.h.in b/solo-ckpool-source/src/jansson-2.14/jansson_private_config.h.in deleted file mode 100644 index b8f5097..0000000 --- a/solo-ckpool-source/src/jansson-2.14/jansson_private_config.h.in +++ /dev/null @@ -1,160 +0,0 @@ -/* jansson_private_config.h.in. Generated from configure.ac by autoheader. */ - -/* Define to 1 if gcc's __atomic builtins are available */ -#undef HAVE_ATOMIC_BUILTINS - -/* Define to 1 if you have the `close' function. */ -#undef HAVE_CLOSE - -/* Define to 1 if you have the header file. */ -#undef HAVE_DLFCN_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_ENDIAN_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_FCNTL_H - -/* Define to 1 if you have the `getpid' function. */ -#undef HAVE_GETPID - -/* Define to 1 if you have the `gettimeofday' function. */ -#undef HAVE_GETTIMEOFDAY - -/* Define to 1 if you have the header file. */ -#undef HAVE_INTTYPES_H - -/* Define to 1 if you have the `localeconv' function. */ -#undef HAVE_LOCALECONV - -/* Define to 1 if you have the header file. */ -#undef HAVE_LOCALE_H - -/* Define to 1 if the system has the type `long long int'. */ -#undef HAVE_LONG_LONG_INT - -/* Define to 1 if you have the `open' function. */ -#undef HAVE_OPEN - -/* Define to 1 if you have the `read' function. */ -#undef HAVE_READ - -/* Define to 1 if you have the header file. */ -#undef HAVE_SCHED_H - -/* Define to 1 if you have the `sched_yield' function. */ -#undef HAVE_SCHED_YIELD - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDINT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDIO_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDLIB_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STRINGS_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STRING_H - -/* Define to 1 if you have the `strtoll' function. */ -#undef HAVE_STRTOLL - -/* Define to 1 if gcc's __sync builtins are available */ -#undef HAVE_SYNC_BUILTINS - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_PARAM_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_STAT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_TIME_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_TYPES_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_UNISTD_H - -/* Define to 1 if the system has the type `unsigned long long int'. */ -#undef HAVE_UNSIGNED_LONG_LONG_INT - -/* Number of buckets new object hashtables contain is 2 raised to this power. - E.g. 3 -> 2^3 = 8. */ -#undef INITIAL_HASHTABLE_ORDER - -/* Define to the sub-directory where libtool stores uninstalled libraries. */ -#undef LT_OBJDIR - -/* Name of package */ -#undef PACKAGE - -/* Define to the address where bug reports for this package should be sent. */ -#undef PACKAGE_BUGREPORT - -/* Define to the full name of this package. */ -#undef PACKAGE_NAME - -/* Define to the full name and version of this package. */ -#undef PACKAGE_STRING - -/* Define to the one symbol short name of this package. */ -#undef PACKAGE_TARNAME - -/* Define to the home page for this package. */ -#undef PACKAGE_URL - -/* Define to the version of this package. */ -#undef PACKAGE_VERSION - -/* Define to 1 if all of the C90 standard headers exist (not just the ones - required in a freestanding environment). This macro is provided for - backward compatibility; new code need not use it. */ -#undef STDC_HEADERS - -/* Define to 1 if /dev/urandom should be used for seeding the hash function */ -#undef USE_URANDOM - -/* Define to 1 if CryptGenRandom should be used for seeding the hash function - */ -#undef USE_WINDOWS_CRYPTOAPI - -/* Version number of package */ -#undef VERSION - -/* Define for Solaris 2.5.1 so the uint32_t typedef from , - , or is not used. If the typedef were allowed, the - #define below would cause a syntax error. */ -#undef _UINT32_T - -/* Define for Solaris 2.5.1 so the uint8_t typedef from , - , or is not used. If the typedef were allowed, the - #define below would cause a syntax error. */ -#undef _UINT8_T - -/* Define to `__inline__' or `__inline' if that's what the C compiler - calls it, or to nothing if 'inline' is not supported under any name. */ -#ifndef __cplusplus -#undef inline -#endif - -/* Define to the type of a signed integer type of width exactly 32 bits if - such a type exists and the standard includes do not define it. */ -#undef int32_t - -/* Define to the type of an unsigned integer type of width exactly 16 bits if - such a type exists and the standard includes do not define it. */ -#undef uint16_t - -/* Define to the type of an unsigned integer type of width exactly 32 bits if - such a type exists and the standard includes do not define it. */ -#undef uint32_t - -/* Define to the type of an unsigned integer type of width exactly 8 bits if - such a type exists and the standard includes do not define it. */ -#undef uint8_t diff --git a/solo-ckpool-source/src/jansson-2.14/scripts/clang-format b/solo-ckpool-source/src/jansson-2.14/scripts/clang-format deleted file mode 100755 index d46056c..0000000 --- a/solo-ckpool-source/src/jansson-2.14/scripts/clang-format +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/bash - -find . -type f -a '(' -name '*.c' -o -name '*.h' ')' | xargs clang-format -i diff --git a/solo-ckpool-source/src/jansson-2.14/scripts/clang-format-check b/solo-ckpool-source/src/jansson-2.14/scripts/clang-format-check deleted file mode 100755 index 983e55d..0000000 --- a/solo-ckpool-source/src/jansson-2.14/scripts/clang-format-check +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -CLANG_FORMAT=${CLANG_FORMAT:-clang-format} -CLANG_FORMAT_VERSION=${CLANG_FORMAT_VERSION:-} - -if ! type $CLANG_FORMAT >/dev/null || \ - ! $CLANG_FORMAT --version | grep -q "version ${CLANG_FORMAT_VERSION}"; then - # If running tests, mark this test as skipped. - exit 77 -fi - -errors=0 -paths=$(git ls-files | grep '\.[ch]$') -for path in $paths; do - in=$(cat $path) - out=$($CLANG_FORMAT $path) - - if [ "$in" != "$out" ]; then - diff -u -L $path -L "$path.formatted" $path - <<<$out - errors=1 - fi -done - -if [ $errors -ne 0 ]; then - echo "Formatting errors detected, run ./scripts/clang-format to fix!" - exit 1 -fi diff --git a/solo-ckpool-source/src/jansson-2.14/src/Makefile.am b/solo-ckpool-source/src/jansson-2.14/src/Makefile.am deleted file mode 100644 index 63eda32..0000000 --- a/solo-ckpool-source/src/jansson-2.14/src/Makefile.am +++ /dev/null @@ -1,30 +0,0 @@ -EXTRA_DIST = jansson.def - -include_HEADERS = jansson.h -nodist_include_HEADERS = jansson_config.h - -lib_LTLIBRARIES = libjansson.la -libjansson_la_SOURCES = \ - dump.c \ - error.c \ - hashtable.c \ - hashtable.h \ - hashtable_seed.c \ - jansson_private.h \ - load.c \ - lookup3.h \ - memory.c \ - pack_unpack.c \ - strbuffer.c \ - strbuffer.h \ - strconv.c \ - utf.c \ - utf.h \ - value.c \ - version.c -libjansson_la_LDFLAGS = \ - -no-undefined \ - -export-symbols-regex '^json_|^jansson_' \ - -version-info 18:0:14 \ - @JSON_SYMVER_LDFLAGS@ \ - @JSON_BSYMBOLIC_LDFLAGS@ diff --git a/solo-ckpool-source/src/jansson-2.14/src/dump.c b/solo-ckpool-source/src/jansson-2.14/src/dump.c deleted file mode 100644 index a86068b..0000000 --- a/solo-ckpool-source/src/jansson-2.14/src/dump.c +++ /dev/null @@ -1,492 +0,0 @@ -/* - * Copyright (c) 2009-2016 Petri Lehtinen - * Copyright (c) 2015,2017,2023 Con Kolivas - * - * Jansson is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ - -#ifndef _GNU_SOURCE -#define _GNU_SOURCE -#endif - -#include "jansson_private.h" - -#include -#include -#include -#include -#ifdef HAVE_UNISTD_H -#include -#endif - -#include "jansson.h" -#include "strbuffer.h" -#include "utf.h" - -#define MAX_INTEGER_STR_LENGTH 100 -#define MAX_REAL_STR_LENGTH 100 - -#define FLAGS_TO_INDENT(f) ((f)&0x1F) -#define FLAGS_TO_PRECISION(f) (((f) >> 11) & 0x1F) - -struct buffer { - const size_t size; - size_t used; - char *data; -}; - -static int dump_to_strbuffer(const char *buffer, size_t size, void *data) { - return strbuffer_append_bytes((strbuffer_t *)data, buffer, size); -} - -static int dump_to_buffer(const char *buffer, size_t size, void *data) { - struct buffer *buf = (struct buffer *)data; - - if (buf->used + size <= buf->size) - memcpy(&buf->data[buf->used], buffer, size); - - buf->used += size; - return 0; -} - -static int dump_to_file(const char *buffer, size_t size, void *data) { - FILE *dest = (FILE *)data; - if (fwrite(buffer, size, 1, dest) != 1) - return -1; - return 0; -} - -static int dump_to_fd(const char *buffer, size_t size, void *data) { -#ifdef HAVE_UNISTD_H - int *dest = (int *)data; - if (write(*dest, buffer, size) == (ssize_t)size) - return 0; -#endif - return -1; -} - -/* 32 spaces (the maximum indentation size) */ -static const char whitespace[] = " "; - -static int dump_indent(size_t flags, int depth, int space, json_dump_callback_t dump, - void *data) { - if (FLAGS_TO_INDENT(flags) > 0) { - unsigned int ws_count = FLAGS_TO_INDENT(flags), n_spaces = depth * ws_count; - - if (dump("\n", 1, data)) - return -1; - - while (n_spaces > 0) { - int cur_n = - n_spaces < sizeof whitespace - 1 ? n_spaces : sizeof whitespace - 1; - - if (dump(whitespace, cur_n, data)) - return -1; - - n_spaces -= cur_n; - } - } else if (space && !(flags & JSON_COMPACT)) { - return dump(" ", 1, data); - } - return 0; -} - -static int dump_string(const char *str, size_t len, json_dump_callback_t dump, void *data, - size_t flags) { - const char *pos, *end, *lim; - int32_t codepoint = 0; - - if (dump("\"", 1, data)) - return -1; - - end = pos = str; - lim = str + len; - while (1) { - const char *text; - char seq[13]; - int length; - - while (end < lim) { - end = utf8_iterate(pos, lim - pos, &codepoint, flags & JSON_NO_UTF8); - if (!end) - return -1; - - /* mandatory escape or control char */ - if (codepoint == '\\' || codepoint == '"' || codepoint < 0x20) - break; - - /* slash */ - if ((flags & JSON_ESCAPE_SLASH) && codepoint == '/') - break; - - /* non-ASCII */ - if ((flags & JSON_ENSURE_ASCII) && codepoint > 0x7F) - break; - - pos = end; - } - - if (pos != str) { - if (dump(str, pos - str, data)) - return -1; - } - - if (end == pos) - break; - - /* handle \, /, ", and control codes */ - length = 2; - switch (codepoint) { - case '\\': - text = "\\\\"; - break; - case '\"': - text = "\\\""; - break; - case '\b': - text = "\\b"; - break; - case '\f': - text = "\\f"; - break; - case '\n': - text = "\\n"; - break; - case '\r': - text = "\\r"; - break; - case '\t': - text = "\\t"; - break; - case '/': - text = "\\/"; - break; - default: { - /* codepoint is in BMP */ - if (codepoint < 0x10000) { - snprintf(seq, sizeof(seq), "\\u%04X", (unsigned int)codepoint); - length = 6; - } - - /* not in BMP -> construct a UTF-16 surrogate pair */ - else { - int32_t first, last; - - codepoint -= 0x10000; - first = 0xD800 | ((codepoint & 0xffc00) >> 10); - last = 0xDC00 | (codepoint & 0x003ff); - - snprintf(seq, sizeof(seq), "\\u%04X\\u%04X", (unsigned int)first, - (unsigned int)last); - length = 12; - } - - text = seq; - break; - } - } - - if (dump(text, length, data)) - return -1; - - str = pos = end; - } - - return dump("\"", 1, data); -} - -struct key_len { - const char *key; - int len; -}; - -static int compare_keys(const void *key1, const void *key2) { - const struct key_len *k1 = key1; - const struct key_len *k2 = key2; - const size_t min_size = k1->len < k2->len ? k1->len : k2->len; - int res = memcmp(k1->key, k2->key, min_size); - - if (res) - return res; - - return k1->len - k2->len; -} - -static int do_dump(const json_t *json, size_t flags, int depth, hashtable_t *parents, - json_dump_callback_t dump, void *data) { - int embed = flags & JSON_EMBED; - - flags &= ~JSON_EMBED; - - if (!json) - return -1; - - switch (json_typeof(json)) { - case JSON_NULL: - return dump("null", 4, data); - - case JSON_TRUE: - return dump("true", 4, data); - - case JSON_FALSE: - return dump("false", 5, data); - - case JSON_INTEGER: { - char buffer[MAX_INTEGER_STR_LENGTH]; - int size; - - size = snprintf(buffer, MAX_INTEGER_STR_LENGTH, "%" JSON_INTEGER_FORMAT, - json_integer_value(json)); - if (size < 0 || size >= MAX_INTEGER_STR_LENGTH) - return -1; - - return dump(buffer, size, data); - } - - case JSON_REAL: { - char buffer[MAX_REAL_STR_LENGTH]; - int size; - double value = json_real_value(json); - - size = jsonp_dtostr(buffer, MAX_REAL_STR_LENGTH, value, - FLAGS_TO_PRECISION(flags)); - if (size < 0) - return -1; - - return dump(buffer, size, data); - } - - case JSON_STRING: - return dump_string(json_string_value(json), json_string_length(json), dump, - data, flags); - - case JSON_ARRAY: { - size_t n; - size_t i; - /* Space for "0x", double the sizeof a pointer for the hex and a - * terminator. */ - char key[2 + (sizeof(json) * 2) + 1]; - size_t key_len; - - /* detect circular references */ - if (jsonp_loop_check(parents, json, key, sizeof(key), &key_len)) - return -1; - - n = json_array_size(json); - - if (!embed && dump("[", 1, data)) - return -1; - if (n == 0) { - hashtable_del(parents, key, key_len); - return embed ? 0 : dump("]", 1, data); - } - if (dump_indent(flags, depth + 1, 0, dump, data)) - return -1; - - for (i = 0; i < n; ++i) { - if (do_dump(json_array_get(json, i), flags, depth + 1, parents, dump, - data)) - return -1; - - if (i < n - 1) { - if (dump(",", 1, data) || - dump_indent(flags, depth + 1, 1, dump, data)) - return -1; - } else { - if (dump_indent(flags, depth, 0, dump, data)) - return -1; - } - } - - hashtable_del(parents, key, key_len); - return embed ? 0 : dump("]", 1, data); - } - - case JSON_OBJECT: { - void *iter; - const char *separator; - int separator_length; - char loop_key[LOOP_KEY_LEN]; - size_t loop_key_len; - - if (flags & JSON_COMPACT) { - separator = ":"; - separator_length = 1; - } else { - separator = ": "; - separator_length = 2; - } - - /* detect circular references */ - if (jsonp_loop_check(parents, json, loop_key, sizeof(loop_key), - &loop_key_len)) - return -1; - - iter = json_object_iter((json_t *)json); - - if (!embed && dump("{", 1, data)) - return -1; - if (!iter) { - hashtable_del(parents, loop_key, loop_key_len); - return embed ? 0 : dump("}", 1, data); - } - if (dump_indent(flags, depth + 1, 0, dump, data)) - return -1; - - if (flags & JSON_SORT_KEYS) { - struct key_len *keys; - size_t size, i; - - size = json_object_size(json); - keys = jsonp_malloc(size * sizeof(struct key_len)); - if (!keys) - return -1; - - i = 0; - while (iter) { - struct key_len *keylen = &keys[i]; - - keylen->key = json_object_iter_key(iter); - keylen->len = json_object_iter_key_len(iter); - - iter = json_object_iter_next((json_t *)json, iter); - i++; - } - assert(i == size); - - qsort(keys, size, sizeof(struct key_len), compare_keys); - - for (i = 0; i < size; i++) { - const struct key_len *key; - json_t *value; - - key = &keys[i]; - value = json_object_getn(json, key->key, key->len); - assert(value); - - dump_string(key->key, key->len, dump, data, flags); - if (dump(separator, separator_length, data) || - do_dump(value, flags, depth + 1, parents, dump, data)) { - jsonp_free(keys); - return -1; - } - - if (i < size - 1) { - if (dump(",", 1, data) || - dump_indent(flags, depth + 1, 1, dump, data)) { - jsonp_free(keys); - return -1; - } - } else { - if (dump_indent(flags, depth, 0, dump, data)) { - jsonp_free(keys); - return -1; - } - } - } - - jsonp_free(keys); - } else { - /* Don't sort keys */ - - while (iter) { - void *next = json_object_iter_next((json_t *)json, iter); - const char *key = json_object_iter_key(iter); - const size_t key_len = json_object_iter_key_len(iter); - - dump_string(key, key_len, dump, data, flags); - if (dump(separator, separator_length, data) || - do_dump(json_object_iter_value(iter), flags, depth + 1, parents, - dump, data)) - return -1; - - if (next) { - if (dump(",", 1, data) || - dump_indent(flags, depth + 1, 1, dump, data)) - return -1; - } else { - if (dump_indent(flags, depth, 0, dump, data)) - return -1; - } - - iter = next; - } - } - - hashtable_del(parents, loop_key, loop_key_len); - return embed ? 0 : dump("}", 1, data); - } - - default: - /* not reached */ - return -1; - } -} - -char *json_dumps(const json_t *json, size_t flags) { - strbuffer_t strbuff; - char *result; - - if (strbuffer_init(&strbuff)) - return NULL; - - if (json_dump_callback(json, dump_to_strbuffer, (void *)&strbuff, flags)) - result = NULL; - else if (flags & JSON_EOL) - result = jsonp_eolstrsteal(&strbuff); - else - result = jsonp_strsteal(&strbuff); - - return result; -} - -size_t json_dumpb(const json_t *json, char *buffer, size_t size, size_t flags) { - struct buffer buf = {size, 0, buffer}; - - if (json_dump_callback(json, dump_to_buffer, (void *)&buf, flags)) - return 0; - - return buf.used; -} - -int json_dumpf(const json_t *json, FILE *output, size_t flags) { - return json_dump_callback(json, dump_to_file, (void *)output, flags); -} - -int json_dumpfd(const json_t *json, int output, size_t flags) { - return json_dump_callback(json, dump_to_fd, (void *)&output, flags); -} - -int json_dump_file(const json_t *json, const char *path, size_t flags) { - int result; - - FILE *output = fopen(path, "w"); - if (!output) - return -1; - - result = json_dumpf(json, output, flags); - - if (fclose(output) != 0) - return -1; - - return result; -} - -int json_dump_callback(const json_t *json, json_dump_callback_t callback, void *data, - size_t flags) { - int res; - hashtable_t parents_set; - - if (!(flags & JSON_ENCODE_ANY)) { - if (!json_is_array(json) && !json_is_object(json)) - return -1; - } - - if (hashtable_init(&parents_set)) - return -1; - res = do_dump(json, flags, 0, &parents_set, callback, data); - hashtable_close(&parents_set); - - return res; -} diff --git a/solo-ckpool-source/src/jansson-2.14/src/error.c b/solo-ckpool-source/src/jansson-2.14/src/error.c deleted file mode 100644 index 14d0047..0000000 --- a/solo-ckpool-source/src/jansson-2.14/src/error.c +++ /dev/null @@ -1,59 +0,0 @@ -#include "jansson_private.h" -#include - -void jsonp_error_init(json_error_t *error, const char *source) { - if (error) { - error->text[0] = '\0'; - error->line = -1; - error->column = -1; - error->position = 0; - if (source) - jsonp_error_set_source(error, source); - else - error->source[0] = '\0'; - } -} - -void jsonp_error_set_source(json_error_t *error, const char *source) { - size_t length; - - if (!error || !source) - return; - - length = strlen(source); - if (length < JSON_ERROR_SOURCE_LENGTH) - strncpy(error->source, source, length + 1); - else { - size_t extra = length - JSON_ERROR_SOURCE_LENGTH + 4; - memcpy(error->source, "...", 3); - strncpy(error->source + 3, source + extra, length - extra + 1); - } -} - -void jsonp_error_set(json_error_t *error, int line, int column, size_t position, - enum json_error_code code, const char *msg, ...) { - va_list ap; - - va_start(ap, msg); - jsonp_error_vset(error, line, column, position, code, msg, ap); - va_end(ap); -} - -void jsonp_error_vset(json_error_t *error, int line, int column, size_t position, - enum json_error_code code, const char *msg, va_list ap) { - if (!error) - return; - - if (error->text[0] != '\0') { - /* error already set */ - return; - } - - error->line = line; - error->column = column; - error->position = (int)position; - - vsnprintf(error->text, JSON_ERROR_TEXT_LENGTH - 1, msg, ap); - error->text[JSON_ERROR_TEXT_LENGTH - 2] = '\0'; - error->text[JSON_ERROR_TEXT_LENGTH - 1] = code; -} diff --git a/solo-ckpool-source/src/jansson-2.14/src/hashtable.c b/solo-ckpool-source/src/jansson-2.14/src/hashtable.c deleted file mode 100644 index 1508d74..0000000 --- a/solo-ckpool-source/src/jansson-2.14/src/hashtable.c +++ /dev/null @@ -1,340 +0,0 @@ -/* - * Copyright (c) 2009-2016 Petri Lehtinen - * - * This library is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ - -#if HAVE_CONFIG_H -#include -#endif - -#include -#include - -#if HAVE_STDINT_H -#include -#endif - -#include "hashtable.h" -#include "jansson_private.h" /* for container_of() */ -#include /* for JSON_INLINE */ - -#ifndef INITIAL_HASHTABLE_ORDER -#define INITIAL_HASHTABLE_ORDER 3 -#endif - -typedef struct hashtable_list list_t; -typedef struct hashtable_pair pair_t; -typedef struct hashtable_bucket bucket_t; - -extern volatile uint32_t hashtable_seed; - -/* Implementation of the hash function */ -#include "lookup3.h" - -#define list_to_pair(list_) container_of(list_, pair_t, list) -#define ordered_list_to_pair(list_) container_of(list_, pair_t, ordered_list) -#define hash_str(key, len) ((size_t)hashlittle((key), len, hashtable_seed)) - -static JSON_INLINE void list_init(list_t *list) { - list->next = list; - list->prev = list; -} - -static JSON_INLINE void list_insert(list_t *list, list_t *node) { - node->next = list; - node->prev = list->prev; - list->prev->next = node; - list->prev = node; -} - -static JSON_INLINE void list_remove(list_t *list) { - list->prev->next = list->next; - list->next->prev = list->prev; -} - -static JSON_INLINE int bucket_is_empty(hashtable_t *hashtable, bucket_t *bucket) { - return bucket->first == &hashtable->list && bucket->first == bucket->last; -} - -static void insert_to_bucket(hashtable_t *hashtable, bucket_t *bucket, list_t *list) { - if (bucket_is_empty(hashtable, bucket)) { - list_insert(&hashtable->list, list); - bucket->first = bucket->last = list; - } else { - list_insert(bucket->first, list); - bucket->first = list; - } -} - -static pair_t *hashtable_find_pair(hashtable_t *hashtable, bucket_t *bucket, - const char *key, size_t key_len, size_t hash) { - list_t *list; - pair_t *pair; - - if (bucket_is_empty(hashtable, bucket)) - return NULL; - - list = bucket->first; - while (1) { - pair = list_to_pair(list); - if (pair->hash == hash && pair->key_len == key_len && - memcmp(pair->key, key, key_len) == 0) - return pair; - - if (list == bucket->last) - break; - - list = list->next; - } - - return NULL; -} - -/* returns 0 on success, -1 if key was not found */ -static int hashtable_do_del(hashtable_t *hashtable, const char *key, size_t key_len, - size_t hash) { - pair_t *pair; - bucket_t *bucket; - size_t index; - - index = hash & hashmask(hashtable->order); - bucket = &hashtable->buckets[index]; - - pair = hashtable_find_pair(hashtable, bucket, key, key_len, hash); - if (!pair) - return -1; - - if (&pair->list == bucket->first && &pair->list == bucket->last) - bucket->first = bucket->last = &hashtable->list; - - else if (&pair->list == bucket->first) - bucket->first = pair->list.next; - - else if (&pair->list == bucket->last) - bucket->last = pair->list.prev; - - list_remove(&pair->list); - list_remove(&pair->ordered_list); - json_decref(pair->value); - - jsonp_free(pair); - hashtable->size--; - - return 0; -} - -static void hashtable_do_clear(hashtable_t *hashtable) { - list_t *list, *next; - pair_t *pair; - - for (list = hashtable->list.next; list != &hashtable->list; list = next) { - next = list->next; - pair = list_to_pair(list); - json_decref(pair->value); - jsonp_free(pair); - } -} - -static int hashtable_do_rehash(hashtable_t *hashtable) { - list_t *list, *next; - pair_t *pair; - size_t i, index, new_size, new_order; - struct hashtable_bucket *new_buckets; - - new_order = hashtable->order + 1; - new_size = hashsize(new_order); - - new_buckets = jsonp_malloc(new_size * sizeof(bucket_t)); - if (!new_buckets) - return -1; - - jsonp_free(hashtable->buckets); - hashtable->buckets = new_buckets; - hashtable->order = new_order; - - for (i = 0; i < hashsize(hashtable->order); i++) { - hashtable->buckets[i].first = hashtable->buckets[i].last = &hashtable->list; - } - - list = hashtable->list.next; - list_init(&hashtable->list); - - for (; list != &hashtable->list; list = next) { - next = list->next; - pair = list_to_pair(list); - index = pair->hash % new_size; - insert_to_bucket(hashtable, &hashtable->buckets[index], &pair->list); - } - - return 0; -} - -int hashtable_init(hashtable_t *hashtable) { - size_t i; - - hashtable->size = 0; - hashtable->order = INITIAL_HASHTABLE_ORDER; - hashtable->buckets = jsonp_malloc(hashsize(hashtable->order) * sizeof(bucket_t)); - if (!hashtable->buckets) - return -1; - - list_init(&hashtable->list); - list_init(&hashtable->ordered_list); - - for (i = 0; i < hashsize(hashtable->order); i++) { - hashtable->buckets[i].first = hashtable->buckets[i].last = &hashtable->list; - } - - return 0; -} - -void hashtable_close(hashtable_t *hashtable) { - hashtable_do_clear(hashtable); - jsonp_free(hashtable->buckets); -} - -static pair_t *init_pair(json_t *value, const char *key, size_t key_len, size_t hash) { - pair_t *pair; - - /* offsetof(...) returns the size of pair_t without the last, - flexible member. This way, the correct amount is - allocated. */ - - if (key_len >= (size_t)-1 - offsetof(pair_t, key)) { - /* Avoid an overflow if the key is very long */ - return NULL; - } - - pair = jsonp_malloc(offsetof(pair_t, key) + key_len + 1); - - if (!pair) - return NULL; - - pair->hash = hash; - memcpy(pair->key, key, key_len); - pair->key[key_len] = '\0'; - pair->key_len = key_len; - pair->value = value; - - list_init(&pair->list); - list_init(&pair->ordered_list); - - return pair; -} - -int hashtable_set(hashtable_t *hashtable, const char *key, size_t key_len, - json_t *value) { - pair_t *pair; - bucket_t *bucket; - size_t hash, index; - - /* rehash if the load ratio exceeds 1 */ - if (hashtable->size >= hashsize(hashtable->order)) - if (hashtable_do_rehash(hashtable)) - return -1; - - hash = hash_str(key, key_len); - index = hash & hashmask(hashtable->order); - bucket = &hashtable->buckets[index]; - pair = hashtable_find_pair(hashtable, bucket, key, key_len, hash); - - if (pair) { - json_decref(pair->value); - pair->value = value; - } else { - pair = init_pair(value, key, key_len, hash); - - if (!pair) - return -1; - - insert_to_bucket(hashtable, bucket, &pair->list); - list_insert(&hashtable->ordered_list, &pair->ordered_list); - - hashtable->size++; - } - return 0; -} - -void *hashtable_get(hashtable_t *hashtable, const char *key, size_t key_len) { - pair_t *pair; - size_t hash; - bucket_t *bucket; - - hash = hash_str(key, key_len); - bucket = &hashtable->buckets[hash & hashmask(hashtable->order)]; - - pair = hashtable_find_pair(hashtable, bucket, key, key_len, hash); - if (!pair) - return NULL; - - return pair->value; -} - -int hashtable_del(hashtable_t *hashtable, const char *key, size_t key_len) { - size_t hash = hash_str(key, key_len); - return hashtable_do_del(hashtable, key, key_len, hash); -} - -void hashtable_clear(hashtable_t *hashtable) { - size_t i; - - hashtable_do_clear(hashtable); - - for (i = 0; i < hashsize(hashtable->order); i++) { - hashtable->buckets[i].first = hashtable->buckets[i].last = &hashtable->list; - } - - list_init(&hashtable->list); - list_init(&hashtable->ordered_list); - hashtable->size = 0; -} - -void *hashtable_iter(hashtable_t *hashtable) { - return hashtable_iter_next(hashtable, &hashtable->ordered_list); -} - -void *hashtable_iter_at(hashtable_t *hashtable, const char *key, size_t key_len) { - pair_t *pair; - size_t hash; - bucket_t *bucket; - - hash = hash_str(key, key_len); - bucket = &hashtable->buckets[hash & hashmask(hashtable->order)]; - - pair = hashtable_find_pair(hashtable, bucket, key, key_len, hash); - if (!pair) - return NULL; - - return &pair->ordered_list; -} - -void *hashtable_iter_next(hashtable_t *hashtable, void *iter) { - list_t *list = (list_t *)iter; - if (list->next == &hashtable->ordered_list) - return NULL; - return list->next; -} - -void *hashtable_iter_key(void *iter) { - pair_t *pair = ordered_list_to_pair((list_t *)iter); - return pair->key; -} - -size_t hashtable_iter_key_len(void *iter) { - pair_t *pair = ordered_list_to_pair((list_t *)iter); - return pair->key_len; -} - -void *hashtable_iter_value(void *iter) { - pair_t *pair = ordered_list_to_pair((list_t *)iter); - return pair->value; -} - -void hashtable_iter_set(void *iter, json_t *value) { - pair_t *pair = ordered_list_to_pair((list_t *)iter); - - json_decref(pair->value); - pair->value = value; -} diff --git a/solo-ckpool-source/src/jansson-2.14/src/hashtable.h b/solo-ckpool-source/src/jansson-2.14/src/hashtable.h deleted file mode 100644 index 03a1f5a..0000000 --- a/solo-ckpool-source/src/jansson-2.14/src/hashtable.h +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright (c) 2009-2016 Petri Lehtinen - * - * This library is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ - -#ifndef HASHTABLE_H -#define HASHTABLE_H - -#include "jansson.h" -#include - -struct hashtable_list { - struct hashtable_list *prev; - struct hashtable_list *next; -}; - -/* "pair" may be a bit confusing a name, but think of it as a - key-value pair. In this case, it just encodes some extra data, - too */ -struct hashtable_pair { - struct hashtable_list list; - struct hashtable_list ordered_list; - size_t hash; - json_t *value; - size_t key_len; - char key[1]; -}; - -struct hashtable_bucket { - struct hashtable_list *first; - struct hashtable_list *last; -}; - -typedef struct hashtable { - size_t size; - struct hashtable_bucket *buckets; - size_t order; /* hashtable has pow(2, order) buckets */ - struct hashtable_list list; - struct hashtable_list ordered_list; -} hashtable_t; - -#define hashtable_key_to_iter(key_) \ - (&(container_of(key_, struct hashtable_pair, key)->ordered_list)) - -/** - * hashtable_init - Initialize a hashtable object - * - * @hashtable: The (statically allocated) hashtable object - * - * Initializes a statically allocated hashtable object. The object - * should be cleared with hashtable_close when it's no longer used. - * - * Returns 0 on success, -1 on error (out of memory). - */ -int hashtable_init(hashtable_t *hashtable) JANSSON_ATTRS((warn_unused_result)); - -/** - * hashtable_close - Release all resources used by a hashtable object - * - * @hashtable: The hashtable - * - * Destroys a statically allocated hashtable object. - */ -void hashtable_close(hashtable_t *hashtable); - -/** - * hashtable_set - Add/modify value in hashtable - * - * @hashtable: The hashtable object - * @key: The key - * @key: The length of key - * @serial: For addition order of keys - * @value: The value - * - * If a value with the given key already exists, its value is replaced - * with the new value. Value is "stealed" in the sense that hashtable - * doesn't increment its refcount but decreases the refcount when the - * value is no longer needed. - * - * Returns 0 on success, -1 on failure (out of memory). - */ -int hashtable_set(hashtable_t *hashtable, const char *key, size_t key_len, json_t *value); - -/** - * hashtable_get - Get a value associated with a key - * - * @hashtable: The hashtable object - * @key: The key - * @key: The length of key - * - * Returns value if it is found, or NULL otherwise. - */ -void *hashtable_get(hashtable_t *hashtable, const char *key, size_t key_len); - -/** - * hashtable_del - Remove a value from the hashtable - * - * @hashtable: The hashtable object - * @key: The key - * @key: The length of key - * - * Returns 0 on success, or -1 if the key was not found. - */ -int hashtable_del(hashtable_t *hashtable, const char *key, size_t key_len); - -/** - * hashtable_clear - Clear hashtable - * - * @hashtable: The hashtable object - * - * Removes all items from the hashtable. - */ -void hashtable_clear(hashtable_t *hashtable); - -/** - * hashtable_iter - Iterate over hashtable - * - * @hashtable: The hashtable object - * - * Returns an opaque iterator to the first element in the hashtable. - * The iterator should be passed to hashtable_iter_* functions. - * The hashtable items are not iterated over in any particular order. - * - * There's no need to free the iterator in any way. The iterator is - * valid as long as the item that is referenced by the iterator is not - * deleted. Other values may be added or deleted. In particular, - * hashtable_iter_next() may be called on an iterator, and after that - * the key/value pair pointed by the old iterator may be deleted. - */ -void *hashtable_iter(hashtable_t *hashtable); - -/** - * hashtable_iter_at - Return an iterator at a specific key - * - * @hashtable: The hashtable object - * @key: The key that the iterator should point to - * @key: The length of key - * - * Like hashtable_iter() but returns an iterator pointing to a - * specific key. - */ -void *hashtable_iter_at(hashtable_t *hashtable, const char *key, size_t key_len); - -/** - * hashtable_iter_next - Advance an iterator - * - * @hashtable: The hashtable object - * @iter: The iterator - * - * Returns a new iterator pointing to the next element in the - * hashtable or NULL if the whole hastable has been iterated over. - */ -void *hashtable_iter_next(hashtable_t *hashtable, void *iter); - -/** - * hashtable_iter_key - Retrieve the key pointed by an iterator - * - * @iter: The iterator - */ -void *hashtable_iter_key(void *iter); - -/** - * hashtable_iter_key_len - Retrieve the key length pointed by an iterator - * - * @iter: The iterator - */ -size_t hashtable_iter_key_len(void *iter); - -/** - * hashtable_iter_value - Retrieve the value pointed by an iterator - * - * @iter: The iterator - */ -void *hashtable_iter_value(void *iter); - -/** - * hashtable_iter_set - Set the value pointed by an iterator - * - * @iter: The iterator - * @value: The value to set - */ -void hashtable_iter_set(void *iter, json_t *value); - -#endif diff --git a/solo-ckpool-source/src/jansson-2.14/src/hashtable_seed.c b/solo-ckpool-source/src/jansson-2.14/src/hashtable_seed.c deleted file mode 100644 index d156b40..0000000 --- a/solo-ckpool-source/src/jansson-2.14/src/hashtable_seed.c +++ /dev/null @@ -1,277 +0,0 @@ -/* Generate sizeof(uint32_t) bytes of as random data as possible to seed - the hash function. -*/ - -#ifdef HAVE_CONFIG_H -#include -#endif - -#include -#include - -#ifdef HAVE_STDINT_H -#include -#endif - -#ifdef HAVE_FCNTL_H -#include -#endif - -#ifdef HAVE_SCHED_H -#include -#endif - -#ifdef HAVE_UNISTD_H -#include -#endif - -#ifdef HAVE_SYS_STAT_H -#include -#endif - -#ifdef HAVE_SYS_TIME_H -#include -#endif - -#ifdef HAVE_SYS_TYPES_H -#include -#endif - -#if defined(_WIN32) -/* For GetModuleHandle(), GetProcAddress() and GetCurrentProcessId() */ -#include -#endif - -#include "jansson.h" - -static uint32_t buf_to_uint32(char *data) { - size_t i; - uint32_t result = 0; - - for (i = 0; i < sizeof(uint32_t); i++) - result = (result << 8) | (unsigned char)data[i]; - - return result; -} - -/* /dev/urandom */ -#if !defined(_WIN32) && defined(USE_URANDOM) -static int seed_from_urandom(uint32_t *seed) { - /* Use unbuffered I/O if we have open(), close() and read(). Otherwise - fall back to fopen() */ - - char data[sizeof(uint32_t)]; - int ok; - -#if defined(HAVE_OPEN) && defined(HAVE_CLOSE) && defined(HAVE_READ) - int urandom; - urandom = open("/dev/urandom", O_RDONLY); - if (urandom == -1) - return 1; - - ok = read(urandom, data, sizeof(uint32_t)) == sizeof(uint32_t); - close(urandom); -#else - FILE *urandom; - - urandom = fopen("/dev/urandom", "rb"); - if (!urandom) - return 1; - - ok = fread(data, 1, sizeof(uint32_t), urandom) == sizeof(uint32_t); - fclose(urandom); -#endif - - if (!ok) - return 1; - - *seed = buf_to_uint32(data); - return 0; -} -#endif - -/* Windows Crypto API */ -#if defined(_WIN32) && defined(USE_WINDOWS_CRYPTOAPI) -#include - -typedef BOOL(WINAPI *CRYPTACQUIRECONTEXTA)(HCRYPTPROV *phProv, LPCSTR pszContainer, - LPCSTR pszProvider, DWORD dwProvType, - DWORD dwFlags); -typedef BOOL(WINAPI *CRYPTGENRANDOM)(HCRYPTPROV hProv, DWORD dwLen, BYTE *pbBuffer); -typedef BOOL(WINAPI *CRYPTRELEASECONTEXT)(HCRYPTPROV hProv, DWORD dwFlags); - -static int seed_from_windows_cryptoapi(uint32_t *seed) { - HINSTANCE hAdvAPI32 = NULL; - CRYPTACQUIRECONTEXTA pCryptAcquireContext = NULL; - CRYPTGENRANDOM pCryptGenRandom = NULL; - CRYPTRELEASECONTEXT pCryptReleaseContext = NULL; - HCRYPTPROV hCryptProv = 0; - BYTE data[sizeof(uint32_t)]; - int ok; - - hAdvAPI32 = GetModuleHandle(TEXT("advapi32.dll")); - if (hAdvAPI32 == NULL) - return 1; - - pCryptAcquireContext = - (CRYPTACQUIRECONTEXTA)GetProcAddress(hAdvAPI32, "CryptAcquireContextA"); - if (!pCryptAcquireContext) - return 1; - - pCryptGenRandom = (CRYPTGENRANDOM)GetProcAddress(hAdvAPI32, "CryptGenRandom"); - if (!pCryptGenRandom) - return 1; - - pCryptReleaseContext = - (CRYPTRELEASECONTEXT)GetProcAddress(hAdvAPI32, "CryptReleaseContext"); - if (!pCryptReleaseContext) - return 1; - - if (!pCryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL, - CRYPT_VERIFYCONTEXT)) - return 1; - - ok = pCryptGenRandom(hCryptProv, sizeof(uint32_t), data); - pCryptReleaseContext(hCryptProv, 0); - - if (!ok) - return 1; - - *seed = buf_to_uint32((char *)data); - return 0; -} -#endif - -/* gettimeofday() and getpid() */ -static int seed_from_timestamp_and_pid(uint32_t *seed) { -#ifdef HAVE_GETTIMEOFDAY - /* XOR of seconds and microseconds */ - struct timeval tv; - gettimeofday(&tv, NULL); - *seed = (uint32_t)tv.tv_sec ^ (uint32_t)tv.tv_usec; -#else - /* Seconds only */ - *seed = (uint32_t)time(NULL); -#endif - - /* XOR with PID for more randomness */ -#if defined(_WIN32) - *seed ^= (uint32_t)GetCurrentProcessId(); -#elif defined(HAVE_GETPID) - *seed ^= (uint32_t)getpid(); -#endif - - return 0; -} - -static uint32_t generate_seed() { - uint32_t seed = 0; - int done = 0; - -#if !defined(_WIN32) && defined(USE_URANDOM) - if (seed_from_urandom(&seed) == 0) - done = 1; -#endif - -#if defined(_WIN32) && defined(USE_WINDOWS_CRYPTOAPI) - if (seed_from_windows_cryptoapi(&seed) == 0) - done = 1; -#endif - - if (!done) { - /* Fall back to timestamp and PID if no better randomness is - available */ - seed_from_timestamp_and_pid(&seed); - } - - /* Make sure the seed is never zero */ - if (seed == 0) - seed = 1; - - return seed; -} - -volatile uint32_t hashtable_seed = 0; - -#if defined(HAVE_ATOMIC_BUILTINS) && (defined(HAVE_SCHED_YIELD) || !defined(_WIN32)) -static volatile char seed_initialized = 0; - -void json_object_seed(size_t seed) { - uint32_t new_seed = (uint32_t)seed; - - if (hashtable_seed == 0) { - if (__atomic_test_and_set(&seed_initialized, __ATOMIC_RELAXED) == 0) { - /* Do the seeding ourselves */ - if (new_seed == 0) - new_seed = generate_seed(); - - __atomic_store_n(&hashtable_seed, new_seed, __ATOMIC_RELEASE); - } else { - /* Wait for another thread to do the seeding */ - do { -#ifdef HAVE_SCHED_YIELD - sched_yield(); -#endif - } while (__atomic_load_n(&hashtable_seed, __ATOMIC_ACQUIRE) == 0); - } - } -} -#elif defined(HAVE_SYNC_BUILTINS) && (defined(HAVE_SCHED_YIELD) || !defined(_WIN32)) -void json_object_seed(size_t seed) { - uint32_t new_seed = (uint32_t)seed; - - if (hashtable_seed == 0) { - if (new_seed == 0) { - /* Explicit synchronization fences are not supported by the - __sync builtins, so every thread getting here has to - generate the seed value. - */ - new_seed = generate_seed(); - } - - do { - if (__sync_bool_compare_and_swap(&hashtable_seed, 0, new_seed)) { - /* We were the first to seed */ - break; - } else { - /* Wait for another thread to do the seeding */ -#ifdef HAVE_SCHED_YIELD - sched_yield(); -#endif - } - } while (hashtable_seed == 0); - } -} -#elif defined(_WIN32) -static long seed_initialized = 0; -void json_object_seed(size_t seed) { - uint32_t new_seed = (uint32_t)seed; - - if (hashtable_seed == 0) { - if (InterlockedIncrement(&seed_initialized) == 1) { - /* Do the seeding ourselves */ - if (new_seed == 0) - new_seed = generate_seed(); - - hashtable_seed = new_seed; - } else { - /* Wait for another thread to do the seeding */ - do { - SwitchToThread(); - } while (hashtable_seed == 0); - } - } -} -#else -/* Fall back to a thread-unsafe version */ -void json_object_seed(size_t seed) { - uint32_t new_seed = (uint32_t)seed; - - if (hashtable_seed == 0) { - if (new_seed == 0) - new_seed = generate_seed(); - - hashtable_seed = new_seed; - } -} -#endif diff --git a/solo-ckpool-source/src/jansson-2.14/src/jansson.def b/solo-ckpool-source/src/jansson-2.14/src/jansson.def deleted file mode 100644 index 5c76c2f..0000000 --- a/solo-ckpool-source/src/jansson-2.14/src/jansson.def +++ /dev/null @@ -1,83 +0,0 @@ -EXPORTS - json_delete - json_true - json_false - json_null - json_sprintf - json_vsprintf - json_string - json_stringn - json_string_nocheck - json_stringn_nocheck - json_string_value - json_string_length - json_string_set - json_string_setn - json_string_set_nocheck - json_string_setn_nocheck - json_integer - json_integer_value - json_integer_set - json_real - json_real_value - json_real_set - json_number_value - json_array - json_array_size - json_array_get - json_array_set_new - json_array_append_new - json_array_insert_new - json_array_remove - json_array_clear - json_array_extend - json_object - json_object_size - json_object_get - json_object_getn - json_object_set_new - json_object_setn_new - json_object_set_new_nocheck - json_object_setn_new_nocheck - json_object_del - json_object_deln - json_object_clear - json_object_update - json_object_update_existing - json_object_update_missing - json_object_update_recursive - json_object_iter - json_object_iter_at - json_object_iter_next - json_object_iter_key - json_object_iter_key_len - json_object_iter_value - json_object_iter_set_new - json_object_key_to_iter - json_object_seed - json_dumps - json_dumpb - json_dumpf - json_dumpfd - json_dump_file - json_dump_callback - json_loads - json_loadb - json_loadf - json_loadfd - json_load_file - json_load_callback - json_equal - json_copy - json_deep_copy - json_pack - json_pack_ex - json_vpack_ex - json_unpack - json_unpack_ex - json_vunpack_ex - json_set_alloc_funcs - json_get_alloc_funcs - jansson_version_str - jansson_version_cmp - diff --git a/solo-ckpool-source/src/jansson-2.14/src/jansson.h b/solo-ckpool-source/src/jansson-2.14/src/jansson.h deleted file mode 100644 index ddc3598..0000000 --- a/solo-ckpool-source/src/jansson-2.14/src/jansson.h +++ /dev/null @@ -1,422 +0,0 @@ -/* - * Copyright (c) 2009-2016 Petri Lehtinen - * Copyright (c) 2015,2017,2023 Con Kolivas - * - * Jansson is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ - -#ifndef JANSSON_H -#define JANSSON_H - -#include -#include -#include /* for size_t */ - -#include "jansson_config.h" - -#ifdef __cplusplus -extern "C" { -#endif - -/* version */ - -#define JANSSON_MAJOR_VERSION 2 -#define JANSSON_MINOR_VERSION 14 -#define JANSSON_MICRO_VERSION 0 - -/* Micro version is omitted if it's 0 */ -#define JANSSON_VERSION "2.14" - -/* Version as a 3-byte hex number, e.g. 0x010201 == 1.2.1. Use this - for numeric comparisons, e.g. #if JANSSON_VERSION_HEX >= ... */ -#define JANSSON_VERSION_HEX \ - ((JANSSON_MAJOR_VERSION << 16) | (JANSSON_MINOR_VERSION << 8) | \ - (JANSSON_MICRO_VERSION << 0)) - -/* If __atomic or __sync builtins are available the library is thread - * safe for all read-only functions plus reference counting. */ -#if JSON_HAVE_ATOMIC_BUILTINS || JSON_HAVE_SYNC_BUILTINS -#define JANSSON_THREAD_SAFE_REFCOUNT 1 -#endif - -#if defined(__GNUC__) || defined(__clang__) -#define JANSSON_ATTRS(x) __attribute__(x) -#else -#define JANSSON_ATTRS(x) -#endif - -/* types */ - -typedef enum { - JSON_OBJECT, - JSON_ARRAY, - JSON_STRING, - JSON_INTEGER, - JSON_REAL, - JSON_TRUE, - JSON_FALSE, - JSON_NULL -} json_type; - -typedef struct json_t { - json_type type; - volatile size_t refcount; -} json_t; - -#ifndef JANSSON_USING_CMAKE /* disabled if using cmake */ -#if JSON_INTEGER_IS_LONG_LONG -#ifdef _WIN32 -#define JSON_INTEGER_FORMAT "I64d" -#else -#define JSON_INTEGER_FORMAT "lld" -#endif -typedef long long json_int_t; -#else -#define JSON_INTEGER_FORMAT "ld" -typedef long json_int_t; -#endif /* JSON_INTEGER_IS_LONG_LONG */ -#endif - -#define json_typeof(json) ((json)->type) -#define json_is_object(json) ((json) && json_typeof(json) == JSON_OBJECT) -#define json_is_array(json) ((json) && json_typeof(json) == JSON_ARRAY) -#define json_is_string(json) ((json) && json_typeof(json) == JSON_STRING) -#define json_is_integer(json) ((json) && json_typeof(json) == JSON_INTEGER) -#define json_is_real(json) ((json) && json_typeof(json) == JSON_REAL) -#define json_is_number(json) (json_is_integer(json) || json_is_real(json)) -#define json_is_true(json) ((json) && json_typeof(json) == JSON_TRUE) -#define json_is_false(json) ((json) && json_typeof(json) == JSON_FALSE) -#define json_boolean_value json_is_true -#define json_is_boolean(json) (json_is_true(json) || json_is_false(json)) -#define json_is_null(json) ((json) && json_typeof(json) == JSON_NULL) - -/* construction, destruction, reference counting */ - -json_t *json_object(void); -json_t *json_array(void); -json_t *json_string(const char *value); -json_t *json_stringn(const char *value, size_t len); -json_t *json_string_nocheck(const char *value); -json_t *json_stringn_nocheck(const char *value, size_t len); -json_t *json_integer(json_int_t value); -json_t *json_real(double value); -json_t *json_true(void); -json_t *json_false(void); -#define json_boolean(val) ((val) ? json_true() : json_false()) -json_t *json_null(void); - -/* do not call JSON_INTERNAL_INCREF or JSON_INTERNAL_DECREF directly */ -#if JSON_HAVE_ATOMIC_BUILTINS -#define JSON_INTERNAL_INCREF(json) \ - __atomic_add_fetch(&json->refcount, 1, __ATOMIC_ACQUIRE) -#define JSON_INTERNAL_DECREF(json) \ - __atomic_sub_fetch(&json->refcount, 1, __ATOMIC_RELEASE) -#elif JSON_HAVE_SYNC_BUILTINS -#define JSON_INTERNAL_INCREF(json) __sync_add_and_fetch(&json->refcount, 1) -#define JSON_INTERNAL_DECREF(json) __sync_sub_and_fetch(&json->refcount, 1) -#else -#define JSON_INTERNAL_INCREF(json) (++json->refcount) -#define JSON_INTERNAL_DECREF(json) (--json->refcount) -#endif - -static JSON_INLINE json_t *json_incref(json_t *json) { - if (json && json->refcount != (size_t)-1) - JSON_INTERNAL_INCREF(json); - return json; -} - -/* do not call json_delete directly */ -void json_delete(json_t *json); - -static JSON_INLINE void json_decref(json_t *json) { - if (json && json->refcount != (size_t)-1 && JSON_INTERNAL_DECREF(json) == 0) - json_delete(json); -} - -#if defined(__GNUC__) || defined(__clang__) -static JSON_INLINE void json_decrefp(json_t **json) { - if (json) { - json_decref(*json); - *json = NULL; - } -} - -#define json_auto_t json_t __attribute__((cleanup(json_decrefp))) -#endif - -/* error reporting */ - -#define JSON_ERROR_TEXT_LENGTH 160 -#define JSON_ERROR_SOURCE_LENGTH 80 - -typedef struct json_error_t { - int line; - int column; - int position; - char source[JSON_ERROR_SOURCE_LENGTH]; - char text[JSON_ERROR_TEXT_LENGTH]; -} json_error_t; - -enum json_error_code { - json_error_unknown, - json_error_out_of_memory, - json_error_stack_overflow, - json_error_cannot_open_file, - json_error_invalid_argument, - json_error_invalid_utf8, - json_error_premature_end_of_input, - json_error_end_of_input_expected, - json_error_invalid_syntax, - json_error_invalid_format, - json_error_wrong_type, - json_error_null_character, - json_error_null_value, - json_error_null_byte_in_key, - json_error_duplicate_key, - json_error_numeric_overflow, - json_error_item_not_found, - json_error_index_out_of_range -}; - -static JSON_INLINE enum json_error_code json_error_code(const json_error_t *e) { - return (enum json_error_code)e->text[JSON_ERROR_TEXT_LENGTH - 1]; -} - -/* getters, setters, manipulation */ - -void json_object_seed(size_t seed); -size_t json_object_size(const json_t *object); -json_t *json_object_get(const json_t *object, const char *key) - JANSSON_ATTRS((warn_unused_result)); -json_t *json_object_getn(const json_t *object, const char *key, size_t key_len) - JANSSON_ATTRS((warn_unused_result)); -int json_object_set_new(json_t *object, const char *key, json_t *value); -int json_object_setn_new(json_t *object, const char *key, size_t key_len, json_t *value); -int json_object_set_new_nocheck(json_t *object, const char *key, json_t *value); -int json_object_setn_new_nocheck(json_t *object, const char *key, size_t key_len, - json_t *value); -int json_object_del(json_t *object, const char *key); -int json_object_deln(json_t *object, const char *key, size_t key_len); -int json_object_clear(json_t *object); -int json_object_update(json_t *object, json_t *other); -int json_object_update_existing(json_t *object, json_t *other); -int json_object_update_missing(json_t *object, json_t *other); -int json_object_update_recursive(json_t *object, json_t *other); -void *json_object_iter(json_t *object); -void *json_object_iter_at(json_t *object, const char *key); -void *json_object_key_to_iter(const char *key); -void *json_object_iter_next(json_t *object, void *iter); -const char *json_object_iter_key(void *iter); -size_t json_object_iter_key_len(void *iter); -json_t *json_object_iter_value(void *iter); -int json_object_iter_set_new(json_t *object, void *iter, json_t *value); - -#define json_object_foreach(object, key, value) \ - for (key = json_object_iter_key(json_object_iter(object)); \ - key && (value = json_object_iter_value(json_object_key_to_iter(key))); \ - key = json_object_iter_key( \ - json_object_iter_next(object, json_object_key_to_iter(key)))) - -#define json_object_keylen_foreach(object, key, key_len, value) \ - for (key = json_object_iter_key(json_object_iter(object)), \ - key_len = json_object_iter_key_len(json_object_key_to_iter(key)); \ - key && (value = json_object_iter_value(json_object_key_to_iter(key))); \ - key = json_object_iter_key( \ - json_object_iter_next(object, json_object_key_to_iter(key))), \ - key_len = json_object_iter_key_len(json_object_key_to_iter(key))) - -#define json_object_foreach_safe(object, n, key, value) \ - for (key = json_object_iter_key(json_object_iter(object)), \ - n = json_object_iter_next(object, json_object_key_to_iter(key)); \ - key && (value = json_object_iter_value(json_object_key_to_iter(key))); \ - key = json_object_iter_key(n), \ - n = json_object_iter_next(object, json_object_key_to_iter(key))) - -#define json_object_keylen_foreach_safe(object, n, key, key_len, value) \ - for (key = json_object_iter_key(json_object_iter(object)), \ - n = json_object_iter_next(object, json_object_key_to_iter(key)), \ - key_len = json_object_iter_key_len(json_object_key_to_iter(key)); \ - key && (value = json_object_iter_value(json_object_key_to_iter(key))); \ - key = json_object_iter_key(n), key_len = json_object_iter_key_len(n), \ - n = json_object_iter_next(object, json_object_key_to_iter(key))) - -#define json_array_foreach(array, index, value) \ - for (index = 0; \ - index < json_array_size(array) && (value = json_array_get(array, index)); \ - index++) - -static JSON_INLINE int json_object_set(json_t *object, const char *key, json_t *value) { - return json_object_set_new(object, key, json_incref(value)); -} - -static JSON_INLINE int json_object_setn(json_t *object, const char *key, size_t key_len, - json_t *value) { - return json_object_setn_new(object, key, key_len, json_incref(value)); -} - -static JSON_INLINE int json_object_set_nocheck(json_t *object, const char *key, - json_t *value) { - return json_object_set_new_nocheck(object, key, json_incref(value)); -} - -static JSON_INLINE int json_object_setn_nocheck(json_t *object, const char *key, - size_t key_len, json_t *value) { - return json_object_setn_new_nocheck(object, key, key_len, json_incref(value)); -} - -static JSON_INLINE int json_object_iter_set(json_t *object, void *iter, json_t *value) { - return json_object_iter_set_new(object, iter, json_incref(value)); -} - -static JSON_INLINE int json_object_update_new(json_t *object, json_t *other) { - int ret = json_object_update(object, other); - json_decref(other); - return ret; -} - -static JSON_INLINE int json_object_update_existing_new(json_t *object, json_t *other) { - int ret = json_object_update_existing(object, other); - json_decref(other); - return ret; -} - -static JSON_INLINE int json_object_update_missing_new(json_t *object, json_t *other) { - int ret = json_object_update_missing(object, other); - json_decref(other); - return ret; -} - -size_t json_array_size(const json_t *array); -json_t *json_array_get(const json_t *array, size_t index) - JANSSON_ATTRS((warn_unused_result)); -int json_array_set_new(json_t *array, size_t index, json_t *value); -int json_array_append_new(json_t *array, json_t *value); -int json_array_insert_new(json_t *array, size_t index, json_t *value); -int json_array_remove(json_t *array, size_t index); -int json_array_clear(json_t *array); -int json_array_extend(json_t *array, json_t *other); - -static JSON_INLINE int json_array_set(json_t *array, size_t ind, json_t *value) { - return json_array_set_new(array, ind, json_incref(value)); -} - -static JSON_INLINE int json_array_append(json_t *array, json_t *value) { - return json_array_append_new(array, json_incref(value)); -} - -static JSON_INLINE int json_array_insert(json_t *array, size_t ind, json_t *value) { - return json_array_insert_new(array, ind, json_incref(value)); -} - -const char *json_string_value(const json_t *string); -size_t json_string_length(const json_t *string); -json_int_t json_integer_value(const json_t *integer); -double json_real_value(const json_t *real); -double json_number_value(const json_t *json); - -int json_string_set(json_t *string, const char *value); -int json_string_setn(json_t *string, const char *value, size_t len); -int json_string_set_nocheck(json_t *string, const char *value); -int json_string_setn_nocheck(json_t *string, const char *value, size_t len); -int json_integer_set(json_t *integer, json_int_t value); -int json_real_set(json_t *real, double value); - -/* pack, unpack */ - -json_t *json_pack(const char *fmt, ...) JANSSON_ATTRS((warn_unused_result)); -json_t *json_pack_ex(json_error_t *error, size_t flags, const char *fmt, ...) - JANSSON_ATTRS((warn_unused_result)); -json_t *json_vpack_ex(json_error_t *error, size_t flags, const char *fmt, va_list ap) - JANSSON_ATTRS((warn_unused_result)); - -#define JSON_VALIDATE_ONLY 0x1 -#define JSON_STRICT 0x2 - -int json_unpack(json_t *root, const char *fmt, ...); -int json_unpack_ex(json_t *root, json_error_t *error, size_t flags, const char *fmt, ...); -int json_vunpack_ex(json_t *root, json_error_t *error, size_t flags, const char *fmt, - va_list ap); - -/* sprintf */ - -json_t *json_sprintf(const char *fmt, ...) - JANSSON_ATTRS((warn_unused_result, format(printf, 1, 2))); -json_t *json_vsprintf(const char *fmt, va_list ap) - JANSSON_ATTRS((warn_unused_result, format(printf, 1, 0))); - -/* equality */ - -int json_equal(const json_t *value1, const json_t *value2); - -/* copying */ - -json_t *json_copy(json_t *value) JANSSON_ATTRS((warn_unused_result)); -json_t *json_deep_copy(const json_t *value) JANSSON_ATTRS((warn_unused_result)); - -/* decoding */ - -#define JSON_REJECT_DUPLICATES 0x1 -#define JSON_DISABLE_EOF_CHECK 0x2 -#define JSON_DECODE_ANY 0x4 -#define JSON_DECODE_INT_AS_REAL 0x8 -#define JSON_ALLOW_NUL 0x10 - -typedef size_t (*json_load_callback_t)(void *buffer, size_t buflen, void *data); - -json_t *json_loads(const char *input, size_t flags, json_error_t *error) - JANSSON_ATTRS((warn_unused_result)); -json_t *json_loadb(const char *buffer, size_t buflen, size_t flags, json_error_t *error) - JANSSON_ATTRS((warn_unused_result)); -json_t *json_loadf(FILE *input, size_t flags, json_error_t *error) - JANSSON_ATTRS((warn_unused_result)); -json_t *json_loadfd(int input, size_t flags, json_error_t *error) - JANSSON_ATTRS((warn_unused_result)); -json_t *json_load_file(const char *path, size_t flags, json_error_t *error) - JANSSON_ATTRS((warn_unused_result)); -json_t *json_load_callback(json_load_callback_t callback, void *data, size_t flags, - json_error_t *error) JANSSON_ATTRS((warn_unused_result)); - -/* encoding */ - -#define JSON_MAX_INDENT 0x1F -#define JSON_INDENT(n) ((n)&JSON_MAX_INDENT) -#define JSON_COMPACT 0x20 -#define JSON_ENSURE_ASCII 0x40 -#define JSON_SORT_KEYS 0x80 -#define JSON_PRESERVE_ORDER 0x100 -#define JSON_ENCODE_ANY 0x200 -#define JSON_ESCAPE_SLASH 0x400 -#define JSON_REAL_PRECISION(n) (((n)&0x1F) << 11) -#define JSON_EMBED 0x10000 -#define JSON_NO_UTF8 0x20000 -#define JSON_EOL 0x40000 - -typedef int (*json_dump_callback_t)(const char *buffer, size_t size, void *data); - -char *json_dumps(const json_t *json, size_t flags) JANSSON_ATTRS((warn_unused_result)); -size_t json_dumpb(const json_t *json, char *buffer, size_t size, size_t flags); -int json_dumpf(const json_t *json, FILE *output, size_t flags); -int json_dumpfd(const json_t *json, int output, size_t flags); -int json_dump_file(const json_t *json, const char *path, size_t flags); -int json_dump_callback(const json_t *json, json_dump_callback_t callback, void *data, - size_t flags); - -/* custom memory allocation */ - -typedef void *(*json_malloc_t)(size_t); -typedef void (*json_free_t)(void *); - -void json_set_alloc_funcs(json_malloc_t malloc_fn, json_free_t free_fn); -void json_get_alloc_funcs(json_malloc_t *malloc_fn, json_free_t *free_fn); - -/* runtime version checking */ - -const char *jansson_version_str(void); -int jansson_version_cmp(int major, int minor, int micro); - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/solo-ckpool-source/src/jansson-2.14/src/jansson_config.h.in b/solo-ckpool-source/src/jansson-2.14/src/jansson_config.h.in deleted file mode 100644 index fe692ab..0000000 --- a/solo-ckpool-source/src/jansson-2.14/src/jansson_config.h.in +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2010-2016 Petri Lehtinen - * - * Jansson is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - * - * - * This file specifies a part of the site-specific configuration for - * Jansson, namely those things that affect the public API in - * jansson.h. - * - * The configure script copies this file to jansson_config.h and - * replaces @var@ substitutions by values that fit your system. If you - * cannot run the configure script, you can do the value substitution - * by hand. - */ - -#ifndef JANSSON_CONFIG_H -#define JANSSON_CONFIG_H - -/* If your compiler supports the inline keyword in C, JSON_INLINE is - defined to `inline', otherwise empty. In C++, the inline is always - supported. */ -#ifdef __cplusplus -#define JSON_INLINE inline -#else -#define JSON_INLINE @json_inline@ -#endif - -/* If your compiler supports the `long long` type and the strtoll() - library function, JSON_INTEGER_IS_LONG_LONG is defined to 1, - otherwise to 0. */ -#define JSON_INTEGER_IS_LONG_LONG @json_have_long_long@ - -/* If locale.h and localeconv() are available, define to 1, - otherwise to 0. */ -#define JSON_HAVE_LOCALECONV @json_have_localeconv@ - -/* If __atomic builtins are available they will be used to manage - reference counts of json_t. */ -#define JSON_HAVE_ATOMIC_BUILTINS @json_have_atomic_builtins@ - -/* If __atomic builtins are not available we try using __sync builtins - to manage reference counts of json_t. */ -#define JSON_HAVE_SYNC_BUILTINS @json_have_sync_builtins@ - -/* Maximum recursion depth for parsing JSON input. - This limits the depth of e.g. array-within-array constructions. */ -#define JSON_PARSER_MAX_DEPTH 2048 - -#endif diff --git a/solo-ckpool-source/src/jansson-2.14/src/jansson_private.h b/solo-ckpool-source/src/jansson-2.14/src/jansson_private.h deleted file mode 100644 index cccbbf5..0000000 --- a/solo-ckpool-source/src/jansson-2.14/src/jansson_private.h +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (c) 2009-2016 Petri Lehtinen - * Copyright (c) 2015,2017,2023 Con Kolivas - * - * Jansson is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ - -#ifndef JANSSON_PRIVATE_H -#define JANSSON_PRIVATE_H - -#include "hashtable.h" -#include "jansson.h" -#include "jansson_private_config.h" -#include "strbuffer.h" -#include - -#define container_of(ptr_, type_, member_) \ - ((type_ *)((char *)ptr_ - offsetof(type_, member_))) - -/* On some platforms, max() may already be defined */ -#ifndef max -#define max(a, b) ((a) > (b) ? (a) : (b)) -#endif - -/* va_copy is a C99 feature. In C89 implementations, it's sometimes - available as __va_copy. If not, memcpy() should do the trick. */ -#ifndef va_copy -#ifdef __va_copy -#define va_copy __va_copy -#else -#define va_copy(a, b) memcpy(&(a), &(b), sizeof(va_list)) -#endif -#endif - -typedef struct { - json_t json; - hashtable_t hashtable; -} json_object_t; - -typedef struct { - json_t json; - size_t size; - size_t entries; - json_t **table; -} json_array_t; - -typedef struct { - json_t json; - char *value; - size_t length; -} json_string_t; - -typedef struct { - json_t json; - double value; -} json_real_t; - -typedef struct { - json_t json; - json_int_t value; -} json_integer_t; - -#define json_to_object(json_) container_of(json_, json_object_t, json) -#define json_to_array(json_) container_of(json_, json_array_t, json) -#define json_to_string(json_) container_of(json_, json_string_t, json) -#define json_to_real(json_) container_of(json_, json_real_t, json) -#define json_to_integer(json_) container_of(json_, json_integer_t, json) - -/* Create a string by taking ownership of an existing buffer */ -json_t *jsonp_stringn_nocheck_own(const char *value, size_t len); - -/* Error message formatting */ -void jsonp_error_init(json_error_t *error, const char *source); -void jsonp_error_set_source(json_error_t *error, const char *source); -void jsonp_error_set(json_error_t *error, int line, int column, size_t position, - enum json_error_code code, const char *msg, ...); -void jsonp_error_vset(json_error_t *error, int line, int column, size_t position, - enum json_error_code code, const char *msg, va_list ap); - -/* Locale independent string<->double conversions */ -int jsonp_strtod(strbuffer_t *strbuffer, double *out); -int jsonp_dtostr(char *buffer, size_t size, double value, int prec); - -/* Wrappers for custom memory functions */ -void *jsonp_malloc(size_t size) JANSSON_ATTRS((warn_unused_result)); -void _jsonp_free(void **ptr); -#define jsonp_free(ptr) _jsonp_free((void *)&(ptr)) - -char *jsonp_strndup(const char *str, size_t length) JANSSON_ATTRS((warn_unused_result)); -char *jsonp_strdup(const char *str) JANSSON_ATTRS((warn_unused_result)); -char *jsonp_strsteal(strbuffer_t *strbuff); -char *jsonp_eolstrsteal(strbuffer_t *strbuff); - -/* Circular reference check*/ -/* Space for "0x", double the sizeof a pointer for the hex and a terminator. */ -#define LOOP_KEY_LEN (2 + (sizeof(json_t *) * 2) + 1) -int jsonp_loop_check(hashtable_t *parents, const json_t *json, char *key, size_t key_size, - size_t *key_len_out); - -/* Windows compatibility */ -#if defined(_WIN32) || defined(WIN32) -#if defined(_MSC_VER) /* MS compiller */ -#if (_MSC_VER < 1900) && \ - !defined(snprintf) /* snprintf not defined yet & not introduced */ -#define snprintf _snprintf -#endif -#if (_MSC_VER < 1500) && \ - !defined(vsnprintf) /* vsnprintf not defined yet & not introduced */ -#define vsnprintf(b, c, f, a) _vsnprintf(b, c, f, a) -#endif -#else /* Other Windows compiller, old definition */ -#define snprintf _snprintf -#define vsnprintf _vsnprintf -#endif -#endif - -#endif diff --git a/solo-ckpool-source/src/jansson-2.14/src/load.c b/solo-ckpool-source/src/jansson-2.14/src/load.c deleted file mode 100644 index 8ae7abd..0000000 --- a/solo-ckpool-source/src/jansson-2.14/src/load.c +++ /dev/null @@ -1,1106 +0,0 @@ -/* - * Copyright (c) 2009-2016 Petri Lehtinen - * - * Jansson is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ - -#ifndef _GNU_SOURCE -#define _GNU_SOURCE -#endif - -#include "jansson_private.h" - -#include -#include -#include -#include -#include -#include -#ifdef HAVE_UNISTD_H -#include -#endif - -#include "jansson.h" -#include "strbuffer.h" -#include "utf.h" - -#define STREAM_STATE_OK 0 -#define STREAM_STATE_EOF -1 -#define STREAM_STATE_ERROR -2 - -#define TOKEN_INVALID -1 -#define TOKEN_EOF 0 -#define TOKEN_STRING 256 -#define TOKEN_INTEGER 257 -#define TOKEN_REAL 258 -#define TOKEN_TRUE 259 -#define TOKEN_FALSE 260 -#define TOKEN_NULL 261 - -/* Locale independent versions of isxxx() functions */ -#define l_isupper(c) ('A' <= (c) && (c) <= 'Z') -#define l_islower(c) ('a' <= (c) && (c) <= 'z') -#define l_isalpha(c) (l_isupper(c) || l_islower(c)) -#define l_isdigit(c) ('0' <= (c) && (c) <= '9') -#define l_isxdigit(c) \ - (l_isdigit(c) || ('A' <= (c) && (c) <= 'F') || ('a' <= (c) && (c) <= 'f')) - -/* Read one byte from stream, convert to unsigned char, then int, and - return. return EOF on end of file. This corresponds to the - behaviour of fgetc(). */ -typedef int (*get_func)(void *data); - -typedef struct { - get_func get; - void *data; - char buffer[5]; - size_t buffer_pos; - int state; - int line; - int column, last_column; - size_t position; -} stream_t; - -typedef struct { - stream_t stream; - strbuffer_t saved_text; - size_t flags; - size_t depth; - int token; - union { - struct { - char *val; - size_t len; - } string; - json_int_t integer; - double real; - } value; -} lex_t; - -#define stream_to_lex(stream) container_of(stream, lex_t, stream) - -/*** error reporting ***/ - -static void error_set(json_error_t *error, const lex_t *lex, enum json_error_code code, - const char *msg, ...) { - va_list ap; - char msg_text[JSON_ERROR_TEXT_LENGTH]; - char msg_with_context[JSON_ERROR_TEXT_LENGTH]; - - int line = -1, col = -1; - size_t pos = 0; - const char *result = msg_text; - - if (!error) - return; - - va_start(ap, msg); - vsnprintf(msg_text, JSON_ERROR_TEXT_LENGTH, msg, ap); - msg_text[JSON_ERROR_TEXT_LENGTH - 1] = '\0'; - va_end(ap); - - if (lex) { - const char *saved_text = strbuffer_value(&lex->saved_text); - - line = lex->stream.line; - col = lex->stream.column; - pos = lex->stream.position; - - if (saved_text && saved_text[0]) { - if (lex->saved_text.length <= 20) { - snprintf(msg_with_context, JSON_ERROR_TEXT_LENGTH, "%s near '%s'", - msg_text, saved_text); - msg_with_context[JSON_ERROR_TEXT_LENGTH - 1] = '\0'; - result = msg_with_context; - } - } else { - if (code == json_error_invalid_syntax) { - /* More specific error code for premature end of file. */ - code = json_error_premature_end_of_input; - } - if (lex->stream.state == STREAM_STATE_ERROR) { - /* No context for UTF-8 decoding errors */ - result = msg_text; - } else { - snprintf(msg_with_context, JSON_ERROR_TEXT_LENGTH, "%s near end of file", - msg_text); - msg_with_context[JSON_ERROR_TEXT_LENGTH - 1] = '\0'; - result = msg_with_context; - } - } - } - - jsonp_error_set(error, line, col, pos, code, "%s", result); -} - -/*** lexical analyzer ***/ - -static void stream_init(stream_t *stream, get_func get, void *data) { - stream->get = get; - stream->data = data; - stream->buffer[0] = '\0'; - stream->buffer_pos = 0; - - stream->state = STREAM_STATE_OK; - stream->line = 1; - stream->column = 0; - stream->position = 0; -} - -static int stream_get(stream_t *stream, json_error_t *error) { - int c; - - if (stream->state != STREAM_STATE_OK) - return stream->state; - - if (!stream->buffer[stream->buffer_pos]) { - c = stream->get(stream->data); - if (c == EOF) { - stream->state = STREAM_STATE_EOF; - return STREAM_STATE_EOF; - } - - stream->buffer[0] = c; - stream->buffer_pos = 0; - - if (0x80 <= c && c <= 0xFF) { - /* multi-byte UTF-8 sequence */ - size_t i, count; - - count = utf8_check_first(c); - if (!count) - goto out; - - assert(count >= 2); - - for (i = 1; i < count; i++) - stream->buffer[i] = stream->get(stream->data); - - if (!utf8_check_full(stream->buffer, count, NULL)) - goto out; - - stream->buffer[count] = '\0'; - } else - stream->buffer[1] = '\0'; - } - - c = stream->buffer[stream->buffer_pos++]; - - stream->position++; - if (c == '\n') { - stream->line++; - stream->last_column = stream->column; - stream->column = 0; - } else if (utf8_check_first(c)) { - /* track the Unicode character column, so increment only if - this is the first character of a UTF-8 sequence */ - stream->column++; - } - - return c; - -out: - stream->state = STREAM_STATE_ERROR; - error_set(error, stream_to_lex(stream), json_error_invalid_utf8, - "unable to decode byte 0x%x", c); - return STREAM_STATE_ERROR; -} - -static void stream_unget(stream_t *stream, int c) { - if (c == STREAM_STATE_EOF || c == STREAM_STATE_ERROR) - return; - - stream->position--; - if (c == '\n') { - stream->line--; - stream->column = stream->last_column; - } else if (utf8_check_first(c)) - stream->column--; - - assert(stream->buffer_pos > 0); - stream->buffer_pos--; - assert(stream->buffer[stream->buffer_pos] == c); -} - -static int lex_get(lex_t *lex, json_error_t *error) { - return stream_get(&lex->stream, error); -} - -static void lex_save(lex_t *lex, int c) { strbuffer_append_byte(&lex->saved_text, c); } - -static int lex_get_save(lex_t *lex, json_error_t *error) { - int c = stream_get(&lex->stream, error); - if (c != STREAM_STATE_EOF && c != STREAM_STATE_ERROR) - lex_save(lex, c); - return c; -} - -static void lex_unget(lex_t *lex, int c) { stream_unget(&lex->stream, c); } - -static void lex_unget_unsave(lex_t *lex, int c) { - if (c != STREAM_STATE_EOF && c != STREAM_STATE_ERROR) { -/* Since we treat warnings as errors, when assertions are turned - * off the "d" variable would be set but never used. Which is - * treated as an error by GCC. - */ -#ifndef NDEBUG - char d; -#endif - stream_unget(&lex->stream, c); -#ifndef NDEBUG - d = -#endif - strbuffer_pop(&lex->saved_text); - assert(c == d); - } -} - -static void lex_save_cached(lex_t *lex) { - while (lex->stream.buffer[lex->stream.buffer_pos] != '\0') { - lex_save(lex, lex->stream.buffer[lex->stream.buffer_pos]); - lex->stream.buffer_pos++; - lex->stream.position++; - } -} - -static void lex_free_string(lex_t *lex) { - jsonp_free(lex->value.string.val); - lex->value.string.val = NULL; - lex->value.string.len = 0; -} - -/* assumes that str points to 'u' plus at least 4 valid hex digits */ -static int32_t decode_unicode_escape(const char *str) { - int i; - int32_t value = 0; - - assert(str[0] == 'u'); - - for (i = 1; i <= 4; i++) { - char c = str[i]; - value <<= 4; - if (l_isdigit(c)) - value += c - '0'; - else if (l_islower(c)) - value += c - 'a' + 10; - else if (l_isupper(c)) - value += c - 'A' + 10; - else - return -1; - } - - return value; -} - -static void lex_scan_string(lex_t *lex, json_error_t *error) { - int c; - const char *p; - char *t; - int i; - - lex->value.string.val = NULL; - lex->token = TOKEN_INVALID; - - c = lex_get_save(lex, error); - - while (c != '"') { - if (c == STREAM_STATE_ERROR) - goto out; - - else if (c == STREAM_STATE_EOF) { - error_set(error, lex, json_error_premature_end_of_input, - "premature end of input"); - goto out; - } - - else if (0 <= c && c <= 0x1F) { - /* control character */ - lex_unget_unsave(lex, c); - if (c == '\n') - error_set(error, lex, json_error_invalid_syntax, "unexpected newline"); - else - error_set(error, lex, json_error_invalid_syntax, "control character 0x%x", - c); - goto out; - } - - else if (c == '\\') { - c = lex_get_save(lex, error); - if (c == 'u') { - c = lex_get_save(lex, error); - for (i = 0; i < 4; i++) { - if (!l_isxdigit(c)) { - error_set(error, lex, json_error_invalid_syntax, - "invalid escape"); - goto out; - } - c = lex_get_save(lex, error); - } - } else if (c == '"' || c == '\\' || c == '/' || c == 'b' || c == 'f' || - c == 'n' || c == 'r' || c == 't') - c = lex_get_save(lex, error); - else { - error_set(error, lex, json_error_invalid_syntax, "invalid escape"); - goto out; - } - } else - c = lex_get_save(lex, error); - } - - /* the actual value is at most of the same length as the source - string, because: - - shortcut escapes (e.g. "\t") (length 2) are converted to 1 byte - - a single \uXXXX escape (length 6) is converted to at most 3 bytes - - two \uXXXX escapes (length 12) forming an UTF-16 surrogate pair - are converted to 4 bytes - */ - t = jsonp_malloc(lex->saved_text.length + 1); - if (!t) { - /* this is not very nice, since TOKEN_INVALID is returned */ - goto out; - } - lex->value.string.val = t; - - /* + 1 to skip the " */ - p = strbuffer_value(&lex->saved_text) + 1; - - while (*p != '"') { - if (*p == '\\') { - p++; - if (*p == 'u') { - size_t length; - int32_t value; - - value = decode_unicode_escape(p); - if (value < 0) { - error_set(error, lex, json_error_invalid_syntax, - "invalid Unicode escape '%.6s'", p - 1); - goto out; - } - p += 5; - - if (0xD800 <= value && value <= 0xDBFF) { - /* surrogate pair */ - if (*p == '\\' && *(p + 1) == 'u') { - int32_t value2 = decode_unicode_escape(++p); - if (value2 < 0) { - error_set(error, lex, json_error_invalid_syntax, - "invalid Unicode escape '%.6s'", p - 1); - goto out; - } - p += 5; - - if (0xDC00 <= value2 && value2 <= 0xDFFF) { - /* valid second surrogate */ - value = - ((value - 0xD800) << 10) + (value2 - 0xDC00) + 0x10000; - } else { - /* invalid second surrogate */ - error_set(error, lex, json_error_invalid_syntax, - "invalid Unicode '\\u%04X\\u%04X'", value, value2); - goto out; - } - } else { - /* no second surrogate */ - error_set(error, lex, json_error_invalid_syntax, - "invalid Unicode '\\u%04X'", value); - goto out; - } - } else if (0xDC00 <= value && value <= 0xDFFF) { - error_set(error, lex, json_error_invalid_syntax, - "invalid Unicode '\\u%04X'", value); - goto out; - } - - if (utf8_encode(value, t, &length)) - assert(0); - t += length; - } else { - switch (*p) { - case '"': - case '\\': - case '/': - *t = *p; - break; - case 'b': - *t = '\b'; - break; - case 'f': - *t = '\f'; - break; - case 'n': - *t = '\n'; - break; - case 'r': - *t = '\r'; - break; - case 't': - *t = '\t'; - break; - default: - assert(0); - } - t++; - p++; - } - } else - *(t++) = *(p++); - } - *t = '\0'; - lex->value.string.len = t - lex->value.string.val; - lex->token = TOKEN_STRING; - return; - -out: - lex_free_string(lex); -} - -#ifndef JANSSON_USING_CMAKE /* disabled if using cmake */ -#if JSON_INTEGER_IS_LONG_LONG -#ifdef _MSC_VER /* Microsoft Visual Studio */ -#define json_strtoint _strtoi64 -#else -#define json_strtoint strtoll -#endif -#else -#define json_strtoint strtol -#endif -#endif - -static int lex_scan_number(lex_t *lex, int c, json_error_t *error) { - const char *saved_text; - char *end; - double doubleval; - - lex->token = TOKEN_INVALID; - - if (c == '-') - c = lex_get_save(lex, error); - - if (c == '0') { - c = lex_get_save(lex, error); - if (l_isdigit(c)) { - lex_unget_unsave(lex, c); - goto out; - } - } else if (l_isdigit(c)) { - do - c = lex_get_save(lex, error); - while (l_isdigit(c)); - } else { - lex_unget_unsave(lex, c); - goto out; - } - - if (!(lex->flags & JSON_DECODE_INT_AS_REAL) && c != '.' && c != 'E' && c != 'e') { - json_int_t intval; - - lex_unget_unsave(lex, c); - - saved_text = strbuffer_value(&lex->saved_text); - - errno = 0; - intval = json_strtoint(saved_text, &end, 10); - if (errno == ERANGE) { - if (intval < 0) - error_set(error, lex, json_error_numeric_overflow, - "too big negative integer"); - else - error_set(error, lex, json_error_numeric_overflow, "too big integer"); - goto out; - } - - assert(end == saved_text + lex->saved_text.length); - - lex->token = TOKEN_INTEGER; - lex->value.integer = intval; - return 0; - } - - if (c == '.') { - c = lex_get(lex, error); - if (!l_isdigit(c)) { - lex_unget(lex, c); - goto out; - } - lex_save(lex, c); - - do - c = lex_get_save(lex, error); - while (l_isdigit(c)); - } - - if (c == 'E' || c == 'e') { - c = lex_get_save(lex, error); - if (c == '+' || c == '-') - c = lex_get_save(lex, error); - - if (!l_isdigit(c)) { - lex_unget_unsave(lex, c); - goto out; - } - - do - c = lex_get_save(lex, error); - while (l_isdigit(c)); - } - - lex_unget_unsave(lex, c); - - if (jsonp_strtod(&lex->saved_text, &doubleval)) { - error_set(error, lex, json_error_numeric_overflow, "real number overflow"); - goto out; - } - - lex->token = TOKEN_REAL; - lex->value.real = doubleval; - return 0; - -out: - return -1; -} - -static int lex_scan(lex_t *lex, json_error_t *error) { - int c; - - strbuffer_clear(&lex->saved_text); - - if (lex->token == TOKEN_STRING) - lex_free_string(lex); - - do - c = lex_get(lex, error); - while (c == ' ' || c == '\t' || c == '\n' || c == '\r'); - - if (c == STREAM_STATE_EOF) { - lex->token = TOKEN_EOF; - goto out; - } - - if (c == STREAM_STATE_ERROR) { - lex->token = TOKEN_INVALID; - goto out; - } - - lex_save(lex, c); - - if (c == '{' || c == '}' || c == '[' || c == ']' || c == ':' || c == ',') - lex->token = c; - - else if (c == '"') - lex_scan_string(lex, error); - - else if (l_isdigit(c) || c == '-') { - if (lex_scan_number(lex, c, error)) - goto out; - } - - else if (l_isalpha(c)) { - /* eat up the whole identifier for clearer error messages */ - const char *saved_text; - - do - c = lex_get_save(lex, error); - while (l_isalpha(c)); - lex_unget_unsave(lex, c); - - saved_text = strbuffer_value(&lex->saved_text); - - if (strcmp(saved_text, "true") == 0) - lex->token = TOKEN_TRUE; - else if (strcmp(saved_text, "false") == 0) - lex->token = TOKEN_FALSE; - else if (strcmp(saved_text, "null") == 0) - lex->token = TOKEN_NULL; - else - lex->token = TOKEN_INVALID; - } - - else { - /* save the rest of the input UTF-8 sequence to get an error - message of valid UTF-8 */ - lex_save_cached(lex); - lex->token = TOKEN_INVALID; - } - -out: - return lex->token; -} - -static char *lex_steal_string(lex_t *lex, size_t *out_len) { - char *result = NULL; - if (lex->token == TOKEN_STRING) { - result = lex->value.string.val; - *out_len = lex->value.string.len; - lex->value.string.val = NULL; - lex->value.string.len = 0; - } - return result; -} - -static int lex_init(lex_t *lex, get_func get, size_t flags, void *data) { - stream_init(&lex->stream, get, data); - if (strbuffer_init(&lex->saved_text)) - return -1; - - lex->flags = flags; - lex->token = TOKEN_INVALID; - return 0; -} - -static void lex_close(lex_t *lex) { - if (lex->token == TOKEN_STRING) - lex_free_string(lex); - strbuffer_close(&lex->saved_text); -} - -/*** parser ***/ - -static json_t *parse_value(lex_t *lex, size_t flags, json_error_t *error); - -static json_t *parse_object(lex_t *lex, size_t flags, json_error_t *error) { - json_t *object = json_object(); - if (!object) - return NULL; - - lex_scan(lex, error); - if (lex->token == '}') - return object; - - while (1) { - char *key; - size_t len; - json_t *value; - - if (lex->token != TOKEN_STRING) { - error_set(error, lex, json_error_invalid_syntax, "string or '}' expected"); - goto error; - } - - key = lex_steal_string(lex, &len); - if (!key) - return NULL; - if (memchr(key, '\0', len)) { - jsonp_free(key); - error_set(error, lex, json_error_null_byte_in_key, - "NUL byte in object key not supported"); - goto error; - } - - if (flags & JSON_REJECT_DUPLICATES) { - if (json_object_getn(object, key, len)) { - jsonp_free(key); - error_set(error, lex, json_error_duplicate_key, "duplicate object key"); - goto error; - } - } - - lex_scan(lex, error); - if (lex->token != ':') { - jsonp_free(key); - error_set(error, lex, json_error_invalid_syntax, "':' expected"); - goto error; - } - - lex_scan(lex, error); - value = parse_value(lex, flags, error); - if (!value) { - jsonp_free(key); - goto error; - } - - if (json_object_setn_new_nocheck(object, key, len, value)) { - jsonp_free(key); - goto error; - } - - jsonp_free(key); - - lex_scan(lex, error); - if (lex->token != ',') - break; - - lex_scan(lex, error); - } - - if (lex->token != '}') { - error_set(error, lex, json_error_invalid_syntax, "'}' expected"); - goto error; - } - - return object; - -error: - json_decref(object); - return NULL; -} - -static json_t *parse_array(lex_t *lex, size_t flags, json_error_t *error) { - json_t *array = json_array(); - if (!array) - return NULL; - - lex_scan(lex, error); - if (lex->token == ']') - return array; - - while (lex->token) { - json_t *elem = parse_value(lex, flags, error); - if (!elem) - goto error; - - if (json_array_append_new(array, elem)) { - goto error; - } - - lex_scan(lex, error); - if (lex->token != ',') - break; - - lex_scan(lex, error); - } - - if (lex->token != ']') { - error_set(error, lex, json_error_invalid_syntax, "']' expected"); - goto error; - } - - return array; - -error: - json_decref(array); - return NULL; -} - -static json_t *parse_value(lex_t *lex, size_t flags, json_error_t *error) { - json_t *json; - - lex->depth++; - if (lex->depth > JSON_PARSER_MAX_DEPTH) { - error_set(error, lex, json_error_stack_overflow, "maximum parsing depth reached"); - return NULL; - } - - switch (lex->token) { - case TOKEN_STRING: { - const char *value = lex->value.string.val; - size_t len = lex->value.string.len; - - if (!(flags & JSON_ALLOW_NUL)) { - if (memchr(value, '\0', len)) { - error_set(error, lex, json_error_null_character, - "\\u0000 is not allowed without JSON_ALLOW_NUL"); - return NULL; - } - } - - json = jsonp_stringn_nocheck_own(value, len); - lex->value.string.val = NULL; - lex->value.string.len = 0; - break; - } - - case TOKEN_INTEGER: { - json = json_integer(lex->value.integer); - break; - } - - case TOKEN_REAL: { - json = json_real(lex->value.real); - break; - } - - case TOKEN_TRUE: - json = json_true(); - break; - - case TOKEN_FALSE: - json = json_false(); - break; - - case TOKEN_NULL: - json = json_null(); - break; - - case '{': - json = parse_object(lex, flags, error); - break; - - case '[': - json = parse_array(lex, flags, error); - break; - - case TOKEN_INVALID: - error_set(error, lex, json_error_invalid_syntax, "invalid token"); - return NULL; - - default: - error_set(error, lex, json_error_invalid_syntax, "unexpected token"); - return NULL; - } - - if (!json) - return NULL; - - lex->depth--; - return json; -} - -static json_t *parse_json(lex_t *lex, size_t flags, json_error_t *error) { - json_t *result; - - lex->depth = 0; - - lex_scan(lex, error); - if (!(flags & JSON_DECODE_ANY)) { - if (lex->token != '[' && lex->token != '{') { - error_set(error, lex, json_error_invalid_syntax, "'[' or '{' expected"); - return NULL; - } - } - - result = parse_value(lex, flags, error); - if (!result) - return NULL; - - if (!(flags & JSON_DISABLE_EOF_CHECK)) { - lex_scan(lex, error); - if (lex->token != TOKEN_EOF) { - error_set(error, lex, json_error_end_of_input_expected, - "end of file expected"); - json_decref(result); - return NULL; - } - } - - if (error) { - /* Save the position even though there was no error */ - error->position = (int)lex->stream.position; - } - - return result; -} - -typedef struct { - const char *data; - size_t pos; -} string_data_t; - -static int string_get(void *data) { - char c; - string_data_t *stream = (string_data_t *)data; - c = stream->data[stream->pos]; - if (c == '\0') - return EOF; - else { - stream->pos++; - return (unsigned char)c; - } -} - -json_t *json_loads(const char *string, size_t flags, json_error_t *error) { - lex_t lex; - json_t *result; - string_data_t stream_data; - - jsonp_error_init(error, ""); - - if (string == NULL) { - error_set(error, NULL, json_error_invalid_argument, "wrong arguments"); - return NULL; - } - - stream_data.data = string; - stream_data.pos = 0; - - if (lex_init(&lex, string_get, flags, (void *)&stream_data)) - return NULL; - - result = parse_json(&lex, flags, error); - - lex_close(&lex); - return result; -} - -typedef struct { - const char *data; - size_t len; - size_t pos; -} buffer_data_t; - -static int buffer_get(void *data) { - char c; - buffer_data_t *stream = data; - if (stream->pos >= stream->len) - return EOF; - - c = stream->data[stream->pos]; - stream->pos++; - return (unsigned char)c; -} - -json_t *json_loadb(const char *buffer, size_t buflen, size_t flags, json_error_t *error) { - lex_t lex; - json_t *result; - buffer_data_t stream_data; - - jsonp_error_init(error, ""); - - if (buffer == NULL) { - error_set(error, NULL, json_error_invalid_argument, "wrong arguments"); - return NULL; - } - - stream_data.data = buffer; - stream_data.pos = 0; - stream_data.len = buflen; - - if (lex_init(&lex, buffer_get, flags, (void *)&stream_data)) - return NULL; - - result = parse_json(&lex, flags, error); - - lex_close(&lex); - return result; -} - -json_t *json_loadf(FILE *input, size_t flags, json_error_t *error) { - lex_t lex; - const char *source; - json_t *result; - - if (input == stdin) - source = ""; - else - source = ""; - - jsonp_error_init(error, source); - - if (input == NULL) { - error_set(error, NULL, json_error_invalid_argument, "wrong arguments"); - return NULL; - } - - if (lex_init(&lex, (get_func)fgetc, flags, input)) - return NULL; - - result = parse_json(&lex, flags, error); - - lex_close(&lex); - return result; -} - -static int fd_get_func(int *fd) { -#ifdef HAVE_UNISTD_H - uint8_t c; - if (read(*fd, &c, 1) == 1) - return c; -#endif - return EOF; -} - -json_t *json_loadfd(int input, size_t flags, json_error_t *error) { - lex_t lex; - const char *source; - json_t *result; - -#ifdef HAVE_UNISTD_H - if (input == STDIN_FILENO) - source = ""; - else -#endif - source = ""; - - jsonp_error_init(error, source); - - if (input < 0) { - error_set(error, NULL, json_error_invalid_argument, "wrong arguments"); - return NULL; - } - - if (lex_init(&lex, (get_func)fd_get_func, flags, &input)) - return NULL; - - result = parse_json(&lex, flags, error); - - lex_close(&lex); - return result; -} - -json_t *json_load_file(const char *path, size_t flags, json_error_t *error) { - json_t *result; - FILE *fp; - - jsonp_error_init(error, path); - - if (path == NULL) { - error_set(error, NULL, json_error_invalid_argument, "wrong arguments"); - return NULL; - } - - fp = fopen(path, "rb"); - if (!fp) { - error_set(error, NULL, json_error_cannot_open_file, "unable to open %s: %s", path, - strerror(errno)); - return NULL; - } - - result = json_loadf(fp, flags, error); - - fclose(fp); - return result; -} - -#define MAX_BUF_LEN 1024 - -typedef struct { - char data[MAX_BUF_LEN]; - size_t len; - size_t pos; - json_load_callback_t callback; - void *arg; -} callback_data_t; - -static int callback_get(void *data) { - char c; - callback_data_t *stream = data; - - if (stream->pos >= stream->len) { - stream->pos = 0; - stream->len = stream->callback(stream->data, MAX_BUF_LEN, stream->arg); - if (stream->len == 0 || stream->len == (size_t)-1) - return EOF; - } - - c = stream->data[stream->pos]; - stream->pos++; - return (unsigned char)c; -} - -json_t *json_load_callback(json_load_callback_t callback, void *arg, size_t flags, - json_error_t *error) { - lex_t lex; - json_t *result; - - callback_data_t stream_data; - - memset(&stream_data, 0, sizeof(stream_data)); - stream_data.callback = callback; - stream_data.arg = arg; - - jsonp_error_init(error, ""); - - if (callback == NULL) { - error_set(error, NULL, json_error_invalid_argument, "wrong arguments"); - return NULL; - } - - if (lex_init(&lex, (get_func)callback_get, flags, &stream_data)) - return NULL; - - result = parse_json(&lex, flags, error); - - lex_close(&lex); - return result; -} diff --git a/solo-ckpool-source/src/jansson-2.14/src/lookup3.h b/solo-ckpool-source/src/jansson-2.14/src/lookup3.h deleted file mode 100644 index 9b39aa1..0000000 --- a/solo-ckpool-source/src/jansson-2.14/src/lookup3.h +++ /dev/null @@ -1,382 +0,0 @@ -// clang-format off -/* -------------------------------------------------------------------------------- -lookup3.c, by Bob Jenkins, May 2006, Public Domain. - -These are functions for producing 32-bit hashes for hash table lookup. -hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final() -are externally useful functions. Routines to test the hash are included -if SELF_TEST is defined. You can use this free for any purpose. It's in -the public domain. It has no warranty. - -You probably want to use hashlittle(). hashlittle() and hashbig() -hash byte arrays. hashlittle() is is faster than hashbig() on -little-endian machines. Intel and AMD are little-endian machines. -On second thought, you probably want hashlittle2(), which is identical to -hashlittle() except it returns two 32-bit hashes for the price of one. -You could implement hashbig2() if you wanted but I haven't bothered here. - -If you want to find a hash of, say, exactly 7 integers, do - a = i1; b = i2; c = i3; - mix(a,b,c); - a += i4; b += i5; c += i6; - mix(a,b,c); - a += i7; - final(a,b,c); -then use c as the hash value. If you have a variable length array of -4-byte integers to hash, use hashword(). If you have a byte array (like -a character string), use hashlittle(). If you have several byte arrays, or -a mix of things, see the comments above hashlittle(). - -Why is this so big? I read 12 bytes at a time into 3 4-byte integers, -then mix those integers. This is fast (you can do a lot more thorough -mixing with 12*3 instructions on 3 integers than you can with 3 instructions -on 1 byte), but shoehorning those bytes into integers efficiently is messy. -------------------------------------------------------------------------------- -*/ - -#include - -#ifdef HAVE_CONFIG_H -#include -#endif - -#ifdef HAVE_STDINT_H -#include /* defines uint32_t etc */ -#endif - -#ifdef HAVE_SYS_PARAM_H -#include /* attempt to define endianness */ -#endif - -#ifdef HAVE_ENDIAN_H -# include /* attempt to define endianness */ -#endif - -/* - * My best guess at if you are big-endian or little-endian. This may - * need adjustment. - */ -#if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \ - __BYTE_ORDER == __LITTLE_ENDIAN) || \ - (defined(i386) || defined(__i386__) || defined(__i486__) || \ - defined(__i586__) || defined(__i686__) || defined(vax) || defined(MIPSEL)) -# define HASH_LITTLE_ENDIAN 1 -# define HASH_BIG_ENDIAN 0 -#elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \ - __BYTE_ORDER == __BIG_ENDIAN) || \ - (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel)) -# define HASH_LITTLE_ENDIAN 0 -# define HASH_BIG_ENDIAN 1 -#else -# define HASH_LITTLE_ENDIAN 0 -# define HASH_BIG_ENDIAN 0 -#endif - -#define hashsize(n) ((size_t)1<<(n)) -#define hashmask(n) (hashsize(n)-1) -#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k)))) - -/* -------------------------------------------------------------------------------- -mix -- mix 3 32-bit values reversibly. - -This is reversible, so any information in (a,b,c) before mix() is -still in (a,b,c) after mix(). - -If four pairs of (a,b,c) inputs are run through mix(), or through -mix() in reverse, there are at least 32 bits of the output that -are sometimes the same for one pair and different for another pair. -This was tested for: -* pairs that differed by one bit, by two bits, in any combination - of top bits of (a,b,c), or in any combination of bottom bits of - (a,b,c). -* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed - the output delta to a Gray code (a^(a>>1)) so a string of 1's (as - is commonly produced by subtraction) look like a single 1-bit - difference. -* the base values were pseudorandom, all zero but one bit set, or - all zero plus a counter that starts at zero. - -Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that -satisfy this are - 4 6 8 16 19 4 - 9 15 3 18 27 15 - 14 9 3 7 17 3 -Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing -for "differ" defined as + with a one-bit base and a two-bit delta. I -used http://burtleburtle.net/bob/hash/avalanche.html to choose -the operations, constants, and arrangements of the variables. - -This does not achieve avalanche. There are input bits of (a,b,c) -that fail to affect some output bits of (a,b,c), especially of a. The -most thoroughly mixed value is c, but it doesn't really even achieve -avalanche in c. - -This allows some parallelism. Read-after-writes are good at doubling -the number of bits affected, so the goal of mixing pulls in the opposite -direction as the goal of parallelism. I did what I could. Rotates -seem to cost as much as shifts on every machine I could lay my hands -on, and rotates are much kinder to the top and bottom bits, so I used -rotates. -------------------------------------------------------------------------------- -*/ -#define mix(a,b,c) \ -{ \ - a -= c; a ^= rot(c, 4); c += b; \ - b -= a; b ^= rot(a, 6); a += c; \ - c -= b; c ^= rot(b, 8); b += a; \ - a -= c; a ^= rot(c,16); c += b; \ - b -= a; b ^= rot(a,19); a += c; \ - c -= b; c ^= rot(b, 4); b += a; \ -} - -/* -------------------------------------------------------------------------------- -final -- final mixing of 3 32-bit values (a,b,c) into c - -Pairs of (a,b,c) values differing in only a few bits will usually -produce values of c that look totally different. This was tested for -* pairs that differed by one bit, by two bits, in any combination - of top bits of (a,b,c), or in any combination of bottom bits of - (a,b,c). -* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed - the output delta to a Gray code (a^(a>>1)) so a string of 1's (as - is commonly produced by subtraction) look like a single 1-bit - difference. -* the base values were pseudorandom, all zero but one bit set, or - all zero plus a counter that starts at zero. - -These constants passed: - 14 11 25 16 4 14 24 - 12 14 25 16 4 14 24 -and these came close: - 4 8 15 26 3 22 24 - 10 8 15 26 3 22 24 - 11 8 15 26 3 22 24 -------------------------------------------------------------------------------- -*/ -#define final(a,b,c) \ -{ \ - c ^= b; c -= rot(b,14); \ - a ^= c; a -= rot(c,11); \ - b ^= a; b -= rot(a,25); \ - c ^= b; c -= rot(b,16); \ - a ^= c; a -= rot(c,4); \ - b ^= a; b -= rot(a,14); \ - c ^= b; c -= rot(b,24); \ -} - -/* -------------------------------------------------------------------------------- -hashlittle() -- hash a variable-length key into a 32-bit value - k : the key (the unaligned variable-length array of bytes) - length : the length of the key, counting by bytes - initval : can be any 4-byte value -Returns a 32-bit value. Every bit of the key affects every bit of -the return value. Two keys differing by one or two bits will have -totally different hash values. - -The best hash table sizes are powers of 2. There is no need to do -mod a prime (mod is sooo slow!). If you need less than 32 bits, -use a bitmask. For example, if you need only 10 bits, do - h = (h & hashmask(10)); -In which case, the hash table should have hashsize(10) elements. - -If you are hashing n strings (uint8_t **)k, do it like this: - for (i=0, h=0; i 12) - { - a += k[0]; - b += k[1]; - c += k[2]; - mix(a,b,c); - length -= 12; - k += 3; - } - - /*----------------------------- handle the last (probably partial) block */ - /* - * "k[2]&0xffffff" actually reads beyond the end of the string, but - * then masks off the part it's not allowed to read. Because the - * string is aligned, the masked-off tail is in the same word as the - * rest of the string. Every machine with memory protection I've seen - * does it on word boundaries, so is OK with this. But VALGRIND will - * still catch it and complain. The masking trick does make the hash - * noticeably faster for short strings (like English words). - */ -#ifndef NO_MASKING_TRICK - - switch(length) - { - case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; - case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break; - case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break; - case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break; - case 8 : b+=k[1]; a+=k[0]; break; - case 7 : b+=k[1]&0xffffff; a+=k[0]; break; - case 6 : b+=k[1]&0xffff; a+=k[0]; break; - case 5 : b+=k[1]&0xff; a+=k[0]; break; - case 4 : a+=k[0]; break; - case 3 : a+=k[0]&0xffffff; break; - case 2 : a+=k[0]&0xffff; break; - case 1 : a+=k[0]&0xff; break; - case 0 : return c; /* zero length strings require no mixing */ - } - -#else /* make valgrind happy */ - - k8 = (const uint8_t *)k; - switch(length) - { - case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; - case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ - case 10: c+=((uint32_t)k8[9])<<8; /* fall through */ - case 9 : c+=k8[8]; /* fall through */ - case 8 : b+=k[1]; a+=k[0]; break; - case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ - case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */ - case 5 : b+=k8[4]; /* fall through */ - case 4 : a+=k[0]; break; - case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ - case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */ - case 1 : a+=k8[0]; break; - case 0 : return c; - } - -#endif /* !valgrind */ - - } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) { - const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */ - const uint8_t *k8; - - /*--------------- all but last block: aligned reads and different mixing */ - while (length > 12) - { - a += k[0] + (((uint32_t)k[1])<<16); - b += k[2] + (((uint32_t)k[3])<<16); - c += k[4] + (((uint32_t)k[5])<<16); - mix(a,b,c); - length -= 12; - k += 6; - } - - /*----------------------------- handle the last (probably partial) block */ - k8 = (const uint8_t *)k; - switch(length) - { - case 12: c+=k[4]+(((uint32_t)k[5])<<16); - b+=k[2]+(((uint32_t)k[3])<<16); - a+=k[0]+(((uint32_t)k[1])<<16); - break; - case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ - case 10: c+=k[4]; - b+=k[2]+(((uint32_t)k[3])<<16); - a+=k[0]+(((uint32_t)k[1])<<16); - break; - case 9 : c+=k8[8]; /* fall through */ - case 8 : b+=k[2]+(((uint32_t)k[3])<<16); - a+=k[0]+(((uint32_t)k[1])<<16); - break; - case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ - case 6 : b+=k[2]; - a+=k[0]+(((uint32_t)k[1])<<16); - break; - case 5 : b+=k8[4]; /* fall through */ - case 4 : a+=k[0]+(((uint32_t)k[1])<<16); - break; - case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ - case 2 : a+=k[0]; - break; - case 1 : a+=k8[0]; - break; - case 0 : return c; /* zero length requires no mixing */ - } - - } else { /* need to read the key one byte at a time */ - const uint8_t *k = (const uint8_t *)key; - - /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ - while (length > 12) - { - a += k[0]; - a += ((uint32_t)k[1])<<8; - a += ((uint32_t)k[2])<<16; - a += ((uint32_t)k[3])<<24; - b += k[4]; - b += ((uint32_t)k[5])<<8; - b += ((uint32_t)k[6])<<16; - b += ((uint32_t)k[7])<<24; - c += k[8]; - c += ((uint32_t)k[9])<<8; - c += ((uint32_t)k[10])<<16; - c += ((uint32_t)k[11])<<24; - mix(a,b,c); - length -= 12; - k += 12; - } - - /*-------------------------------- last block: affect all 32 bits of (c) */ - switch(length) /* all the case statements fall through */ - { - case 12: c+=((uint32_t)k[11])<<24; /* fall through */ - case 11: c+=((uint32_t)k[10])<<16; /* fall through */ - case 10: c+=((uint32_t)k[9])<<8; /* fall through */ - case 9 : c+=k[8]; /* fall through */ - case 8 : b+=((uint32_t)k[7])<<24; /* fall through */ - case 7 : b+=((uint32_t)k[6])<<16; /* fall through */ - case 6 : b+=((uint32_t)k[5])<<8; /* fall through */ - case 5 : b+=k[4]; /* fall through */ - case 4 : a+=((uint32_t)k[3])<<24; /* fall through */ - case 3 : a+=((uint32_t)k[2])<<16; /* fall through */ - case 2 : a+=((uint32_t)k[1])<<8; /* fall through */ - case 1 : a+=k[0]; - break; - case 0 : return c; - } - } - - final(a,b,c); - return c; -} diff --git a/solo-ckpool-source/src/jansson-2.14/src/memory.c b/solo-ckpool-source/src/jansson-2.14/src/memory.c deleted file mode 100644 index a912007..0000000 --- a/solo-ckpool-source/src/jansson-2.14/src/memory.c +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright (c) 2009-2016 Petri Lehtinen - * Copyright (c) 2011-2012 Basile Starynkevitch - * Copyright (c) 2015,2017,2023 Con Kolivas - * - * Jansson is free software; you can redistribute it and/or modify it - * under the terms of the MIT license. See LICENSE for details. - */ - -#include -#include - -#include "jansson.h" -#include "jansson_private.h" - -/* C89 allows these to be macros */ -#undef malloc -#undef free - -/* memory function pointers */ -static json_malloc_t do_malloc = malloc; -static json_free_t do_free = free; - -void *jsonp_malloc(size_t size) { - if (!size) - return NULL; - - return (*do_malloc)(size); -} - -void _jsonp_free(void **ptr) { - if (!*ptr) - return; - - (*do_free)(*ptr); - *ptr = NULL; -} - -char *jsonp_strdup(const char *str) { return jsonp_strndup(str, strlen(str)); } - -char *jsonp_strndup(const char *str, size_t len) { - char *new_str; - - new_str = jsonp_malloc(len + 1); - if (!new_str) - return NULL; - - memcpy(new_str, str, len); - new_str[len] = '\0'; - return new_str; -} - -char *jsonp_strsteal(strbuffer_t *strbuff) -{ - size_t len = strbuff->length + 1; - char *ret = realloc(strbuff->value, len); - - return ret; -} - -char *jsonp_eolstrsteal(strbuffer_t *strbuff) -{ - size_t len = strbuff->length + 2; - char *ret = realloc(strbuff->value, len); - - ret[strbuff->length] = '\n'; - ret[strbuff->length + 1] = '\0'; - return ret; -} - -void json_set_alloc_funcs(json_malloc_t malloc_fn, json_free_t free_fn) { - do_malloc = malloc_fn; - do_free = free_fn; -} - -void json_get_alloc_funcs(json_malloc_t *malloc_fn, json_free_t *free_fn) { - if (malloc_fn) - *malloc_fn = do_malloc; - if (free_fn) - *free_fn = do_free; -} diff --git a/solo-ckpool-source/src/jansson-2.14/src/pack_unpack.c b/solo-ckpool-source/src/jansson-2.14/src/pack_unpack.c deleted file mode 100644 index 04c116e..0000000 --- a/solo-ckpool-source/src/jansson-2.14/src/pack_unpack.c +++ /dev/null @@ -1,937 +0,0 @@ -/* - * Copyright (c) 2009-2016 Petri Lehtinen - * Copyright (c) 2011-2012 Graeme Smecher - * - * Jansson is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ - -#include "jansson.h" -#include "jansson_private.h" -#include "utf.h" -#include - -typedef struct { - int line; - int column; - size_t pos; - char token; -} token_t; - -typedef struct { - const char *start; - const char *fmt; - token_t prev_token; - token_t token; - token_t next_token; - json_error_t *error; - size_t flags; - int line; - int column; - size_t pos; - int has_error; -} scanner_t; - -#define token(scanner) ((scanner)->token.token) - -static const char *const type_names[] = {"object", "array", "string", "integer", - "real", "true", "false", "null"}; - -#define type_name(x) type_names[json_typeof(x)] - -static const char unpack_value_starters[] = "{[siIbfFOon"; - -static void scanner_init(scanner_t *s, json_error_t *error, size_t flags, - const char *fmt) { - s->error = error; - s->flags = flags; - s->fmt = s->start = fmt; - memset(&s->prev_token, 0, sizeof(token_t)); - memset(&s->token, 0, sizeof(token_t)); - memset(&s->next_token, 0, sizeof(token_t)); - s->line = 1; - s->column = 0; - s->pos = 0; - s->has_error = 0; -} - -static void next_token(scanner_t *s) { - const char *t; - s->prev_token = s->token; - - if (s->next_token.line) { - s->token = s->next_token; - s->next_token.line = 0; - return; - } - - if (!token(s) && !*s->fmt) - return; - - t = s->fmt; - s->column++; - s->pos++; - - /* skip space and ignored chars */ - while (*t == ' ' || *t == '\t' || *t == '\n' || *t == ',' || *t == ':') { - if (*t == '\n') { - s->line++; - s->column = 1; - } else - s->column++; - - s->pos++; - t++; - } - - s->token.token = *t; - s->token.line = s->line; - s->token.column = s->column; - s->token.pos = s->pos; - - if (*t) - t++; - s->fmt = t; -} - -static void prev_token(scanner_t *s) { - s->next_token = s->token; - s->token = s->prev_token; -} - -static void set_error(scanner_t *s, const char *source, enum json_error_code code, - const char *fmt, ...) { - va_list ap; - va_start(ap, fmt); - - jsonp_error_vset(s->error, s->token.line, s->token.column, s->token.pos, code, fmt, - ap); - - jsonp_error_set_source(s->error, source); - - va_end(ap); -} - -static json_t *pack(scanner_t *s, va_list *ap); - -/* ours will be set to 1 if jsonp_free() must be called for the result - afterwards */ -static char *read_string(scanner_t *s, va_list *ap, const char *purpose, size_t *out_len, - int *ours, int optional) { - char t; - strbuffer_t strbuff; - const char *str; - size_t length; - - next_token(s); - t = token(s); - prev_token(s); - - *ours = 0; - if (t != '#' && t != '%' && t != '+') { - /* Optimize the simple case */ - str = va_arg(*ap, const char *); - - if (!str) { - if (!optional) { - set_error(s, "", json_error_null_value, "NULL %s", purpose); - s->has_error = 1; - } - return NULL; - } - - length = strlen(str); - - if (!utf8_check_string(str, length)) { - set_error(s, "", json_error_invalid_utf8, "Invalid UTF-8 %s", purpose); - s->has_error = 1; - return NULL; - } - - *out_len = length; - return (char *)str; - } else if (optional) { - set_error(s, "", json_error_invalid_format, - "Cannot use '%c' on optional strings", t); - s->has_error = 1; - - return NULL; - } - - if (strbuffer_init(&strbuff)) { - set_error(s, "", json_error_out_of_memory, "Out of memory"); - s->has_error = 1; - } - - while (1) { - str = va_arg(*ap, const char *); - if (!str) { - set_error(s, "", json_error_null_value, "NULL %s", purpose); - s->has_error = 1; - } - - next_token(s); - - if (token(s) == '#') { - length = va_arg(*ap, int); - } else if (token(s) == '%') { - length = va_arg(*ap, size_t); - } else { - prev_token(s); - length = s->has_error ? 0 : strlen(str); - } - - if (!s->has_error && strbuffer_append_bytes(&strbuff, str, length) == -1) { - set_error(s, "", json_error_out_of_memory, "Out of memory"); - s->has_error = 1; - } - - next_token(s); - if (token(s) != '+') { - prev_token(s); - break; - } - } - - if (s->has_error) { - strbuffer_close(&strbuff); - return NULL; - } - - if (!utf8_check_string(strbuff.value, strbuff.length)) { - set_error(s, "", json_error_invalid_utf8, "Invalid UTF-8 %s", purpose); - strbuffer_close(&strbuff); - s->has_error = 1; - return NULL; - } - - *out_len = strbuff.length; - *ours = 1; - return strbuffer_steal_value(&strbuff); -} - -static json_t *pack_object(scanner_t *s, va_list *ap) { - json_t *object = json_object(); - next_token(s); - - while (token(s) != '}') { - char *key; - size_t len; - int ours; - json_t *value; - char valueOptional; - - if (!token(s)) { - set_error(s, "", json_error_invalid_format, - "Unexpected end of format string"); - goto error; - } - - if (token(s) != 's') { - set_error(s, "", json_error_invalid_format, - "Expected format 's', got '%c'", token(s)); - goto error; - } - - key = read_string(s, ap, "object key", &len, &ours, 0); - - next_token(s); - - next_token(s); - valueOptional = token(s); - prev_token(s); - - value = pack(s, ap); - if (!value) { - if (ours) - jsonp_free(key); - - if (valueOptional != '*') { - set_error(s, "", json_error_null_value, "NULL object value"); - s->has_error = 1; - } - - next_token(s); - continue; - } - - if (s->has_error) - json_decref(value); - - if (!s->has_error && json_object_set_new_nocheck(object, key, value)) { - set_error(s, "", json_error_out_of_memory, - "Unable to add key \"%s\"", key); - s->has_error = 1; - } - - if (ours) - jsonp_free(key); - - next_token(s); - } - - if (!s->has_error) - return object; - -error: - json_decref(object); - return NULL; -} - -static json_t *pack_array(scanner_t *s, va_list *ap) { - json_t *array = json_array(); - next_token(s); - - while (token(s) != ']') { - json_t *value; - char valueOptional; - - if (!token(s)) { - set_error(s, "", json_error_invalid_format, - "Unexpected end of format string"); - /* Format string errors are unrecoverable. */ - goto error; - } - - next_token(s); - valueOptional = token(s); - prev_token(s); - - value = pack(s, ap); - if (!value) { - if (valueOptional != '*') { - s->has_error = 1; - } - - next_token(s); - continue; - } - - if (s->has_error) - json_decref(value); - - if (!s->has_error && json_array_append_new(array, value)) { - set_error(s, "", json_error_out_of_memory, - "Unable to append to array"); - s->has_error = 1; - } - - next_token(s); - } - - if (!s->has_error) - return array; - -error: - json_decref(array); - return NULL; -} - -static json_t *pack_string(scanner_t *s, va_list *ap) { - char *str; - char t; - size_t len; - int ours; - int optional; - - next_token(s); - t = token(s); - optional = t == '?' || t == '*'; - if (!optional) - prev_token(s); - - str = read_string(s, ap, "string", &len, &ours, optional); - - if (!str) - return t == '?' && !s->has_error ? json_null() : NULL; - - if (s->has_error) { - /* It's impossible to reach this point if ours != 0, do not free str. */ - return NULL; - } - - if (ours) - return jsonp_stringn_nocheck_own(str, len); - - return json_stringn_nocheck(str, len); -} - -static json_t *pack_object_inter(scanner_t *s, va_list *ap, int need_incref) { - json_t *json; - char ntoken; - - next_token(s); - ntoken = token(s); - - if (ntoken != '?' && ntoken != '*') - prev_token(s); - - json = va_arg(*ap, json_t *); - - if (json) - return need_incref ? json_incref(json) : json; - - switch (ntoken) { - case '?': - return json_null(); - case '*': - return NULL; - default: - break; - } - - set_error(s, "", json_error_null_value, "NULL object"); - s->has_error = 1; - return NULL; -} - -static json_t *pack_integer(scanner_t *s, json_int_t value) { - json_t *json = json_integer(value); - - if (!json) { - set_error(s, "", json_error_out_of_memory, "Out of memory"); - s->has_error = 1; - } - - return json; -} - -static json_t *pack_real(scanner_t *s, double value) { - /* Allocate without setting value so we can identify OOM error. */ - json_t *json = json_real(0.0); - - if (!json) { - set_error(s, "", json_error_out_of_memory, "Out of memory"); - s->has_error = 1; - - return NULL; - } - - if (json_real_set(json, value)) { - json_decref(json); - - set_error(s, "", json_error_numeric_overflow, - "Invalid floating point value"); - s->has_error = 1; - - return NULL; - } - - return json; -} - -static json_t *pack(scanner_t *s, va_list *ap) { - switch (token(s)) { - case '{': - return pack_object(s, ap); - - case '[': - return pack_array(s, ap); - - case 's': /* string */ - return pack_string(s, ap); - - case 'n': /* null */ - return json_null(); - - case 'b': /* boolean */ - return va_arg(*ap, int) ? json_true() : json_false(); - - case 'i': /* integer from int */ - return pack_integer(s, va_arg(*ap, int)); - - case 'I': /* integer from json_int_t */ - return pack_integer(s, va_arg(*ap, json_int_t)); - - case 'f': /* real */ - return pack_real(s, va_arg(*ap, double)); - - case 'O': /* a json_t object; increments refcount */ - return pack_object_inter(s, ap, 1); - - case 'o': /* a json_t object; doesn't increment refcount */ - return pack_object_inter(s, ap, 0); - - default: - set_error(s, "", json_error_invalid_format, - "Unexpected format character '%c'", token(s)); - s->has_error = 1; - return NULL; - } -} - -static int unpack(scanner_t *s, json_t *root, va_list *ap); - -static int unpack_object(scanner_t *s, json_t *root, va_list *ap) { - int ret = -1; - int strict = 0; - int gotopt = 0; - - /* Use a set (emulated by a hashtable) to check that all object - keys are accessed. Checking that the correct number of keys - were accessed is not enough, as the same key can be unpacked - multiple times. - */ - hashtable_t key_set; - - if (hashtable_init(&key_set)) { - set_error(s, "", json_error_out_of_memory, "Out of memory"); - return -1; - } - - if (root && !json_is_object(root)) { - set_error(s, "", json_error_wrong_type, "Expected object, got %s", - type_name(root)); - goto out; - } - next_token(s); - - while (token(s) != '}') { - const char *key; - json_t *value; - int opt = 0; - - if (strict != 0) { - set_error(s, "", json_error_invalid_format, - "Expected '}' after '%c', got '%c'", (strict == 1 ? '!' : '*'), - token(s)); - goto out; - } - - if (!token(s)) { - set_error(s, "", json_error_invalid_format, - "Unexpected end of format string"); - goto out; - } - - if (token(s) == '!' || token(s) == '*') { - strict = (token(s) == '!' ? 1 : -1); - next_token(s); - continue; - } - - if (token(s) != 's') { - set_error(s, "", json_error_invalid_format, - "Expected format 's', got '%c'", token(s)); - goto out; - } - - key = va_arg(*ap, const char *); - if (!key) { - set_error(s, "", json_error_null_value, "NULL object key"); - goto out; - } - - next_token(s); - - if (token(s) == '?') { - opt = gotopt = 1; - next_token(s); - } - - if (!root) { - /* skipping */ - value = NULL; - } else { - value = json_object_get(root, key); - if (!value && !opt) { - set_error(s, "", json_error_item_not_found, - "Object item not found: %s", key); - goto out; - } - } - - if (unpack(s, value, ap)) - goto out; - - hashtable_set(&key_set, key, strlen(key), json_null()); - next_token(s); - } - - if (strict == 0 && (s->flags & JSON_STRICT)) - strict = 1; - - if (root && strict == 1) { - /* We need to check that all non optional items have been parsed */ - const char *key; - size_t key_len; - /* keys_res is 1 for uninitialized, 0 for success, -1 for error. */ - int keys_res = 1; - strbuffer_t unrecognized_keys; - json_t *value; - long unpacked = 0; - - if (gotopt || json_object_size(root) != key_set.size) { - json_object_foreach(root, key, value) { - key_len = strlen(key); - if (!hashtable_get(&key_set, key, key_len)) { - unpacked++; - - /* Save unrecognized keys for the error message */ - if (keys_res == 1) { - keys_res = strbuffer_init(&unrecognized_keys); - } else if (!keys_res) { - keys_res = strbuffer_append_bytes(&unrecognized_keys, ", ", 2); - } - - if (!keys_res) - keys_res = - strbuffer_append_bytes(&unrecognized_keys, key, key_len); - } - } - } - if (unpacked) { - set_error(s, "", json_error_end_of_input_expected, - "%li object item(s) left unpacked: %s", unpacked, - keys_res ? "" : strbuffer_value(&unrecognized_keys)); - strbuffer_close(&unrecognized_keys); - goto out; - } - } - - ret = 0; - -out: - hashtable_close(&key_set); - return ret; -} - -static int unpack_array(scanner_t *s, json_t *root, va_list *ap) { - size_t i = 0; - int strict = 0; - - if (root && !json_is_array(root)) { - set_error(s, "", json_error_wrong_type, "Expected array, got %s", - type_name(root)); - return -1; - } - next_token(s); - - while (token(s) != ']') { - json_t *value; - - if (strict != 0) { - set_error(s, "", json_error_invalid_format, - "Expected ']' after '%c', got '%c'", (strict == 1 ? '!' : '*'), - token(s)); - return -1; - } - - if (!token(s)) { - set_error(s, "", json_error_invalid_format, - "Unexpected end of format string"); - return -1; - } - - if (token(s) == '!' || token(s) == '*') { - strict = (token(s) == '!' ? 1 : -1); - next_token(s); - continue; - } - - if (!strchr(unpack_value_starters, token(s))) { - set_error(s, "", json_error_invalid_format, - "Unexpected format character '%c'", token(s)); - return -1; - } - - if (!root) { - /* skipping */ - value = NULL; - } else { - value = json_array_get(root, i); - if (!value) { - set_error(s, "", json_error_index_out_of_range, - "Array index %lu out of range", (unsigned long)i); - return -1; - } - } - - if (unpack(s, value, ap)) - return -1; - - next_token(s); - i++; - } - - if (strict == 0 && (s->flags & JSON_STRICT)) - strict = 1; - - if (root && strict == 1 && i != json_array_size(root)) { - long diff = (long)json_array_size(root) - (long)i; - set_error(s, "", json_error_end_of_input_expected, - "%li array item(s) left unpacked", diff); - return -1; - } - - return 0; -} - -static int unpack(scanner_t *s, json_t *root, va_list *ap) { - switch (token(s)) { - case '{': - return unpack_object(s, root, ap); - - case '[': - return unpack_array(s, root, ap); - - case 's': - if (root && !json_is_string(root)) { - set_error(s, "", json_error_wrong_type, - "Expected string, got %s", type_name(root)); - return -1; - } - - if (!(s->flags & JSON_VALIDATE_ONLY)) { - const char **str_target; - size_t *len_target = NULL; - - str_target = va_arg(*ap, const char **); - if (!str_target) { - set_error(s, "", json_error_null_value, "NULL string argument"); - return -1; - } - - next_token(s); - - if (token(s) == '%') { - len_target = va_arg(*ap, size_t *); - if (!len_target) { - set_error(s, "", json_error_null_value, - "NULL string length argument"); - return -1; - } - } else - prev_token(s); - - if (root) { - *str_target = json_string_value(root); - if (len_target) - *len_target = json_string_length(root); - } - } - return 0; - - case 'i': - if (root && !json_is_integer(root)) { - set_error(s, "", json_error_wrong_type, - "Expected integer, got %s", type_name(root)); - return -1; - } - - if (!(s->flags & JSON_VALIDATE_ONLY)) { - int *target = va_arg(*ap, int *); - if (root) - *target = (int)json_integer_value(root); - } - - return 0; - - case 'I': - if (root && !json_is_integer(root)) { - set_error(s, "", json_error_wrong_type, - "Expected integer, got %s", type_name(root)); - return -1; - } - - if (!(s->flags & JSON_VALIDATE_ONLY)) { - json_int_t *target = va_arg(*ap, json_int_t *); - if (root) - *target = json_integer_value(root); - } - - return 0; - - case 'b': - if (root && !json_is_boolean(root)) { - set_error(s, "", json_error_wrong_type, - "Expected true or false, got %s", type_name(root)); - return -1; - } - - if (!(s->flags & JSON_VALIDATE_ONLY)) { - int *target = va_arg(*ap, int *); - if (root) - *target = json_is_true(root); - } - - return 0; - - case 'f': - if (root && !json_is_real(root)) { - set_error(s, "", json_error_wrong_type, - "Expected real, got %s", type_name(root)); - return -1; - } - - if (!(s->flags & JSON_VALIDATE_ONLY)) { - double *target = va_arg(*ap, double *); - if (root) - *target = json_real_value(root); - } - - return 0; - - case 'F': - if (root && !json_is_number(root)) { - set_error(s, "", json_error_wrong_type, - "Expected real or integer, got %s", type_name(root)); - return -1; - } - - if (!(s->flags & JSON_VALIDATE_ONLY)) { - double *target = va_arg(*ap, double *); - if (root) - *target = json_number_value(root); - } - - return 0; - - case 'O': - if (root && !(s->flags & JSON_VALIDATE_ONLY)) - json_incref(root); - /* Fall through */ - - case 'o': - if (!(s->flags & JSON_VALIDATE_ONLY)) { - json_t **target = va_arg(*ap, json_t **); - if (root) - *target = root; - } - - return 0; - - case 'n': - /* Never assign, just validate */ - if (root && !json_is_null(root)) { - set_error(s, "", json_error_wrong_type, - "Expected null, got %s", type_name(root)); - return -1; - } - return 0; - - default: - set_error(s, "", json_error_invalid_format, - "Unexpected format character '%c'", token(s)); - return -1; - } -} - -json_t *json_vpack_ex(json_error_t *error, size_t flags, const char *fmt, va_list ap) { - scanner_t s; - va_list ap_copy; - json_t *value; - - if (!fmt || !*fmt) { - jsonp_error_init(error, ""); - jsonp_error_set(error, -1, -1, 0, json_error_invalid_argument, - "NULL or empty format string"); - return NULL; - } - jsonp_error_init(error, NULL); - - scanner_init(&s, error, flags, fmt); - next_token(&s); - - va_copy(ap_copy, ap); - value = pack(&s, &ap_copy); - va_end(ap_copy); - - /* This will cover all situations where s.has_error is true */ - if (!value) - return NULL; - - next_token(&s); - if (token(&s)) { - json_decref(value); - set_error(&s, "", json_error_invalid_format, - "Garbage after format string"); - return NULL; - } - - return value; -} - -json_t *json_pack_ex(json_error_t *error, size_t flags, const char *fmt, ...) { - json_t *value; - va_list ap; - - va_start(ap, fmt); - value = json_vpack_ex(error, flags, fmt, ap); - va_end(ap); - - return value; -} - -json_t *json_pack(const char *fmt, ...) { - json_t *value; - va_list ap; - - va_start(ap, fmt); - value = json_vpack_ex(NULL, 0, fmt, ap); - va_end(ap); - - return value; -} - -int json_vunpack_ex(json_t *root, json_error_t *error, size_t flags, const char *fmt, - va_list ap) { - scanner_t s; - va_list ap_copy; - - if (!root) { - jsonp_error_init(error, ""); - jsonp_error_set(error, -1, -1, 0, json_error_null_value, "NULL root value"); - return -1; - } - - if (!fmt || !*fmt) { - jsonp_error_init(error, ""); - jsonp_error_set(error, -1, -1, 0, json_error_invalid_argument, - "NULL or empty format string"); - return -1; - } - jsonp_error_init(error, NULL); - - scanner_init(&s, error, flags, fmt); - next_token(&s); - - va_copy(ap_copy, ap); - if (unpack(&s, root, &ap_copy)) { - va_end(ap_copy); - return -1; - } - va_end(ap_copy); - - next_token(&s); - if (token(&s)) { - set_error(&s, "", json_error_invalid_format, - "Garbage after format string"); - return -1; - } - - return 0; -} - -int json_unpack_ex(json_t *root, json_error_t *error, size_t flags, const char *fmt, - ...) { - int ret; - va_list ap; - - va_start(ap, fmt); - ret = json_vunpack_ex(root, error, flags, fmt, ap); - va_end(ap); - - return ret; -} - -int json_unpack(json_t *root, const char *fmt, ...) { - int ret; - va_list ap; - - va_start(ap, fmt); - ret = json_vunpack_ex(root, NULL, 0, fmt, ap); - va_end(ap); - - return ret; -} diff --git a/solo-ckpool-source/src/jansson-2.14/src/strbuffer.c b/solo-ckpool-source/src/jansson-2.14/src/strbuffer.c deleted file mode 100644 index 733da31..0000000 --- a/solo-ckpool-source/src/jansson-2.14/src/strbuffer.c +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright (c) 2009-2016 Petri Lehtinen - * Copyright (c) 2015,2017,2023 Con Kolivas - * - * Jansson is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ - -#ifndef _GNU_SOURCE -#define _GNU_SOURCE -#endif - -#include "strbuffer.h" -#include "jansson_private.h" -#include -#include -#include - -#define STRBUFFER_MIN_SIZE 4096 -#define STRBUFFER_FACTOR 2 -#define STRBUFFER_SIZE_MAX ((size_t)-1) - -int strbuffer_init(strbuffer_t *strbuff) { - strbuff->size = STRBUFFER_MIN_SIZE; - strbuff->length = 0; - - strbuff->value = jsonp_malloc(strbuff->size); - if (!strbuff->value) - return -1; - - /* initialize to empty */ - strbuff->value[0] = '\0'; - return 0; -} - -void strbuffer_close(strbuffer_t *strbuff) { - if (strbuff->value) - jsonp_free(strbuff->value); - - strbuff->size = 0; - strbuff->length = 0; - strbuff->value = NULL; -} - -void strbuffer_clear(strbuffer_t *strbuff) { - strbuff->length = 0; - strbuff->value[0] = '\0'; -} - -const char *strbuffer_value(const strbuffer_t *strbuff) { return strbuff->value; } - -char *strbuffer_steal_value(strbuffer_t *strbuff) { - char *result = strbuff->value; - strbuff->value = NULL; - return result; -} - -int strbuffer_append_byte(strbuffer_t *strbuff, char byte) { - return strbuffer_append_bytes(strbuff, &byte, 1); -} - -int strbuffer_append_bytes(strbuffer_t *strbuff, const char *data, size_t size) { - /* Leave room for EOL and NULL bytes */ - if(size + 2 > strbuff->size - strbuff->length) { - int backoff = 1; - size_t new_size; - char *new_value; - - /* avoid integer overflow */ - if (strbuff->size > STRBUFFER_SIZE_MAX / STRBUFFER_FACTOR || - size > STRBUFFER_SIZE_MAX - 1 || - strbuff->length > STRBUFFER_SIZE_MAX - 1 - size) - return -1; - - new_size = max(strbuff->size * STRBUFFER_FACTOR, strbuff->length + size + 1); - - while (42) { - new_value = realloc(strbuff->value, new_size); - if (new_value) - break; - usleep(backoff * 1000); - backoff <<= 1; - } - - strbuff->value = new_value; - strbuff->size = new_size; - } - - memcpy(strbuff->value + strbuff->length, data, size); - strbuff->length += size; - strbuff->value[strbuff->length] = '\0'; - - return 0; -} - -char strbuffer_pop(strbuffer_t *strbuff) { - if (strbuff->length > 0) { - char c = strbuff->value[--strbuff->length]; - strbuff->value[strbuff->length] = '\0'; - return c; - } else - return '\0'; -} diff --git a/solo-ckpool-source/src/jansson-2.14/src/strbuffer.h b/solo-ckpool-source/src/jansson-2.14/src/strbuffer.h deleted file mode 100644 index 70f2646..0000000 --- a/solo-ckpool-source/src/jansson-2.14/src/strbuffer.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (c) 2009-2016 Petri Lehtinen - * - * Jansson is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ - -#ifndef STRBUFFER_H -#define STRBUFFER_H - -#include "jansson.h" -#include - -typedef struct { - char *value; - size_t length; /* bytes used */ - size_t size; /* bytes allocated */ -} strbuffer_t; - -int strbuffer_init(strbuffer_t *strbuff) JANSSON_ATTRS((warn_unused_result)); -void strbuffer_close(strbuffer_t *strbuff); - -void strbuffer_clear(strbuffer_t *strbuff); - -const char *strbuffer_value(const strbuffer_t *strbuff); - -/* Steal the value and close the strbuffer */ -char *strbuffer_steal_value(strbuffer_t *strbuff); - -int strbuffer_append_byte(strbuffer_t *strbuff, char byte); -int strbuffer_append_bytes(strbuffer_t *strbuff, const char *data, size_t size); - -char strbuffer_pop(strbuffer_t *strbuff); - -#endif diff --git a/solo-ckpool-source/src/jansson-2.14/src/strconv.c b/solo-ckpool-source/src/jansson-2.14/src/strconv.c deleted file mode 100644 index c6f4fd1..0000000 --- a/solo-ckpool-source/src/jansson-2.14/src/strconv.c +++ /dev/null @@ -1,132 +0,0 @@ -#include "jansson_private.h" -#include "strbuffer.h" -#include -#include -#include -#include -#include - -/* need jansson_private_config.h to get the correct snprintf */ -#ifdef HAVE_CONFIG_H -#include -#endif - -#if JSON_HAVE_LOCALECONV -#include - -/* - - This code assumes that the decimal separator is exactly one - character. - - - If setlocale() is called by another thread between the call to - localeconv() and the call to sprintf() or strtod(), the result may - be wrong. setlocale() is not thread-safe and should not be used - this way. Multi-threaded programs should use uselocale() instead. -*/ - -static void to_locale(strbuffer_t *strbuffer) { - const char *point; - char *pos; - - point = localeconv()->decimal_point; - if (*point == '.') { - /* No conversion needed */ - return; - } - - pos = strchr(strbuffer->value, '.'); - if (pos) - *pos = *point; -} - -static void from_locale(char *buffer) { - const char *point; - char *pos; - - point = localeconv()->decimal_point; - if (*point == '.') { - /* No conversion needed */ - return; - } - - pos = strchr(buffer, *point); - if (pos) - *pos = '.'; -} -#endif - -int jsonp_strtod(strbuffer_t *strbuffer, double *out) { - double value; - char *end; - -#if JSON_HAVE_LOCALECONV - to_locale(strbuffer); -#endif - - errno = 0; - value = strtod(strbuffer->value, &end); - assert(end == strbuffer->value + strbuffer->length); - - if ((value == HUGE_VAL || value == -HUGE_VAL) && errno == ERANGE) { - /* Overflow */ - return -1; - } - - *out = value; - return 0; -} - -int jsonp_dtostr(char *buffer, size_t size, double value, int precision) { - int ret; - char *start, *end; - size_t length; - - if (precision == 0) - precision = 17; - - ret = snprintf(buffer, size, "%.*g", precision, value); - if (ret < 0) - return -1; - - length = (size_t)ret; - if (length >= size) - return -1; - -#if JSON_HAVE_LOCALECONV - from_locale(buffer); -#endif - - /* Make sure there's a dot or 'e' in the output. Otherwise - a real is converted to an integer when decoding */ - if (strchr(buffer, '.') == NULL && strchr(buffer, 'e') == NULL) { - if (length + 3 >= size) { - /* No space to append ".0" */ - return -1; - } - buffer[length] = '.'; - buffer[length + 1] = '0'; - buffer[length + 2] = '\0'; - length += 2; - } - - /* Remove leading '+' from positive exponent. Also remove leading - zeros from exponents (added by some printf() implementations) */ - start = strchr(buffer, 'e'); - if (start) { - start++; - end = start + 1; - - if (*start == '-') - start++; - - while (*end == '0') - end++; - - if (end != start) { - memmove(start, end, length - (size_t)(end - buffer)); - length -= (size_t)(end - start); - } - } - - return (int)length; -} diff --git a/solo-ckpool-source/src/jansson-2.14/src/utf.c b/solo-ckpool-source/src/jansson-2.14/src/utf.c deleted file mode 100644 index 135a3f3..0000000 --- a/solo-ckpool-source/src/jansson-2.14/src/utf.c +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Copyright (c) 2009-2016 Petri Lehtinen - * Copyright (c) 2015,2017,2023 Con Kolivas - * - * Jansson is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ - -#include "utf.h" -#include - -int utf8_encode(int32_t codepoint, char *buffer, size_t *size) { - if (codepoint < 0) - return -1; - else if (codepoint < 0x80) { - buffer[0] = (char)codepoint; - *size = 1; - } else if (codepoint < 0x800) { - buffer[0] = 0xC0 + ((codepoint & 0x7C0) >> 6); - buffer[1] = 0x80 + ((codepoint & 0x03F)); - *size = 2; - } else if (codepoint < 0x10000) { - buffer[0] = 0xE0 + ((codepoint & 0xF000) >> 12); - buffer[1] = 0x80 + ((codepoint & 0x0FC0) >> 6); - buffer[2] = 0x80 + ((codepoint & 0x003F)); - *size = 3; - } else if (codepoint <= 0x10FFFF) { - buffer[0] = 0xF0 + ((codepoint & 0x1C0000) >> 18); - buffer[1] = 0x80 + ((codepoint & 0x03F000) >> 12); - buffer[2] = 0x80 + ((codepoint & 0x000FC0) >> 6); - buffer[3] = 0x80 + ((codepoint & 0x00003F)); - *size = 4; - } else - return -1; - - return 0; -} - -size_t utf8_check_first(char byte) { - unsigned char u = (unsigned char)byte; - - if (u < 0x80) - return 1; - - if (0x80 <= u && u <= 0xBF) { - /* second, third or fourth byte of a multi-byte - sequence, i.e. a "continuation byte" */ - return 0; - } else if (u == 0xC0 || u == 0xC1) { - /* overlong encoding of an ASCII byte */ - return 0; - } else if (0xC2 <= u && u <= 0xDF) { - /* 2-byte sequence */ - return 2; - } - - else if (0xE0 <= u && u <= 0xEF) { - /* 3-byte sequence */ - return 3; - } else if (0xF0 <= u && u <= 0xF4) { - /* 4-byte sequence */ - return 4; - } else { /* u >= 0xF5 */ - /* Restricted (start of 4-, 5- or 6-byte sequence) or invalid - UTF-8 */ - return 0; - } -} - -size_t utf8_check_full(const char *buffer, size_t size, int32_t *codepoint) { - size_t i; - int32_t value = 0; - unsigned char u = (unsigned char)buffer[0]; - - if (size == 2) { - value = u & 0x1F; - } else if (size == 3) { - value = u & 0xF; - } else if (size == 4) { - value = u & 0x7; - } else - return 0; - - for (i = 1; i < size; i++) { - u = (unsigned char)buffer[i]; - - if (u < 0x80 || u > 0xBF) { - /* not a continuation byte */ - return 0; - } - - value = (value << 6) + (u & 0x3F); - } - - if (value > 0x10FFFF) { - /* not in Unicode range */ - return 0; - } - - else if (0xD800 <= value && value <= 0xDFFF) { - /* invalid code point (UTF-16 surrogate halves) */ - return 0; - } - - else if ((size == 2 && value < 0x80) || (size == 3 && value < 0x800) || - (size == 4 && value < 0x10000)) { - /* overlong encoding */ - return 0; - } - - if (codepoint) - *codepoint = value; - - return 1; -} - -const char *utf8_iterate(const char *buffer, size_t bufsize, int32_t *codepoint, int noutf8) -{ - size_t count = 1; - int32_t value; - - if (!bufsize) - return buffer; - - if (!noutf8) { - count = utf8_check_first(buffer[0]); - if(count <= 0) - return NULL; - } - - if (count == 1) - value = (unsigned char)buffer[0]; - else { - if (count > bufsize || !utf8_check_full(buffer, count, &value)) - return NULL; - } - - if (codepoint) - *codepoint = value; - - return buffer + count; -} - -int utf8_check_string(const char *string, size_t length) { - size_t i; - - for (i = 0; i < length; i++) { - size_t count = utf8_check_first(string[i]); - if (count == 0) - return 0; - else if (count > 1) { - if (count > length - i) - return 0; - - if (!utf8_check_full(&string[i], count, NULL)) - return 0; - - i += count - 1; - } - } - - return 1; -} diff --git a/solo-ckpool-source/src/jansson-2.14/src/utf.h b/solo-ckpool-source/src/jansson-2.14/src/utf.h deleted file mode 100644 index a5c46cb..0000000 --- a/solo-ckpool-source/src/jansson-2.14/src/utf.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2009-2016 Petri Lehtinen - * Copyright (c) 2015,2017,2023 Con Kolivas - * - * Jansson is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ - -#ifndef UTF_H -#define UTF_H - -#ifdef HAVE_CONFIG_H -#include -#endif - -#include -#ifdef HAVE_STDINT_H -#include -#endif - -int utf8_encode(int32_t codepoint, char *buffer, size_t *size); - -size_t utf8_check_first(char byte); -size_t utf8_check_full(const char *buffer, size_t size, int32_t *codepoint); -const char *utf8_iterate(const char *buffer, size_t size, int32_t *codepoint, int noutf8); - -int utf8_check_string(const char *string, size_t length); - -#endif diff --git a/solo-ckpool-source/src/jansson-2.14/src/value.c b/solo-ckpool-source/src/jansson-2.14/src/value.c deleted file mode 100644 index 07af087..0000000 --- a/solo-ckpool-source/src/jansson-2.14/src/value.c +++ /dev/null @@ -1,1112 +0,0 @@ -/* - * Copyright (c) 2009-2016 Petri Lehtinen - * - * Jansson is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ - -#ifndef _GNU_SOURCE -#define _GNU_SOURCE -#endif - -#ifdef HAVE_CONFIG_H -#include -#endif - -#include -#include -#include -#include - -#ifdef HAVE_STDINT_H -#include -#endif - -#include "hashtable.h" -#include "jansson.h" -#include "jansson_private.h" -#include "utf.h" - -/* Work around nonstandard isnan() and isinf() implementations */ -#ifndef isnan -#ifndef __sun -static JSON_INLINE int isnan(double x) { return x != x; } -#endif -#endif -#ifndef isinf -static JSON_INLINE int isinf(double x) { return !isnan(x) && isnan(x - x); } -#endif - -json_t *do_deep_copy(const json_t *json, hashtable_t *parents); - -static JSON_INLINE void json_init(json_t *json, json_type type) { - json->type = type; - json->refcount = 1; -} - -int jsonp_loop_check(hashtable_t *parents, const json_t *json, char *key, size_t key_size, - size_t *key_len_out) { - size_t key_len = snprintf(key, key_size, "%p", json); - - if (key_len_out) - *key_len_out = key_len; - - if (hashtable_get(parents, key, key_len)) - return -1; - - return hashtable_set(parents, key, key_len, json_null()); -} - -/*** object ***/ - -extern volatile uint32_t hashtable_seed; - -json_t *json_object(void) { - json_object_t *object = jsonp_malloc(sizeof(json_object_t)); - if (!object) - return NULL; - - if (!hashtable_seed) { - /* Autoseed */ - json_object_seed(0); - } - - json_init(&object->json, JSON_OBJECT); - - if (hashtable_init(&object->hashtable)) { - jsonp_free(object); - return NULL; - } - - return &object->json; -} - -static void json_delete_object(json_object_t *object) { - hashtable_close(&object->hashtable); - jsonp_free(object); -} - -size_t json_object_size(const json_t *json) { - json_object_t *object; - - if (!json_is_object(json)) - return 0; - - object = json_to_object(json); - return object->hashtable.size; -} - -json_t *json_object_get(const json_t *json, const char *key) { - if (!key) - return NULL; - - return json_object_getn(json, key, strlen(key)); -} - -json_t *json_object_getn(const json_t *json, const char *key, size_t key_len) { - json_object_t *object; - - if (!key || !json_is_object(json)) - return NULL; - - object = json_to_object(json); - return hashtable_get(&object->hashtable, key, key_len); -} - -int json_object_set_new_nocheck(json_t *json, const char *key, json_t *value) { - if (!key) { - json_decref(value); - return -1; - } - return json_object_setn_new_nocheck(json, key, strlen(key), value); -} - -int json_object_setn_new_nocheck(json_t *json, const char *key, size_t key_len, - json_t *value) { - json_object_t *object; - - if (!value) - return -1; - - if (!key || !json_is_object(json) || json == value) { - json_decref(value); - return -1; - } - object = json_to_object(json); - - if (hashtable_set(&object->hashtable, key, key_len, value)) { - json_decref(value); - return -1; - } - - return 0; -} - -int json_object_set_new(json_t *json, const char *key, json_t *value) { - if (!key) { - json_decref(value); - return -1; - } - - return json_object_setn_new(json, key, strlen(key), value); -} - -int json_object_setn_new(json_t *json, const char *key, size_t key_len, json_t *value) { - if (!key || !utf8_check_string(key, key_len)) { - json_decref(value); - return -1; - } - - return json_object_setn_new_nocheck(json, key, key_len, value); -} - -int json_object_del(json_t *json, const char *key) { - if (!key) - return -1; - - return json_object_deln(json, key, strlen(key)); -} - -int json_object_deln(json_t *json, const char *key, size_t key_len) { - json_object_t *object; - - if (!key || !json_is_object(json)) - return -1; - - object = json_to_object(json); - return hashtable_del(&object->hashtable, key, key_len); -} - -int json_object_clear(json_t *json) { - json_object_t *object; - - if (!json_is_object(json)) - return -1; - - object = json_to_object(json); - hashtable_clear(&object->hashtable); - - return 0; -} - -int json_object_update(json_t *object, json_t *other) { - const char *key; - json_t *value; - - if (!json_is_object(object) || !json_is_object(other)) - return -1; - - json_object_foreach(other, key, value) { - if (json_object_set_nocheck(object, key, value)) - return -1; - } - - return 0; -} - -int json_object_update_existing(json_t *object, json_t *other) { - const char *key; - size_t key_len; - json_t *value; - - if (!json_is_object(object) || !json_is_object(other)) - return -1; - - json_object_keylen_foreach(other, key, key_len, value) { - if (json_object_getn(object, key, key_len)) - json_object_setn_nocheck(object, key, key_len, value); - } - - return 0; -} - -int json_object_update_missing(json_t *object, json_t *other) { - const char *key; - json_t *value; - - if (!json_is_object(object) || !json_is_object(other)) - return -1; - - json_object_foreach(other, key, value) { - if (!json_object_get(object, key)) - json_object_set_nocheck(object, key, value); - } - - return 0; -} - -int do_object_update_recursive(json_t *object, json_t *other, hashtable_t *parents) { - const char *key; - size_t key_len; - json_t *value; - char loop_key[LOOP_KEY_LEN]; - int res = 0; - size_t loop_key_len; - - if (!json_is_object(object) || !json_is_object(other)) - return -1; - - if (jsonp_loop_check(parents, other, loop_key, sizeof(loop_key), &loop_key_len)) - return -1; - - json_object_keylen_foreach(other, key, key_len, value) { - json_t *v = json_object_get(object, key); - - if (json_is_object(v) && json_is_object(value)) { - if (do_object_update_recursive(v, value, parents)) { - res = -1; - break; - } - } else { - if (json_object_setn_nocheck(object, key, key_len, value)) { - res = -1; - break; - } - } - } - - hashtable_del(parents, loop_key, loop_key_len); - - return res; -} - -int json_object_update_recursive(json_t *object, json_t *other) { - int res; - hashtable_t parents_set; - - if (hashtable_init(&parents_set)) - return -1; - res = do_object_update_recursive(object, other, &parents_set); - hashtable_close(&parents_set); - - return res; -} - -void *json_object_iter(json_t *json) { - json_object_t *object; - - if (!json_is_object(json)) - return NULL; - - object = json_to_object(json); - return hashtable_iter(&object->hashtable); -} - -void *json_object_iter_at(json_t *json, const char *key) { - json_object_t *object; - - if (!key || !json_is_object(json)) - return NULL; - - object = json_to_object(json); - return hashtable_iter_at(&object->hashtable, key, strlen(key)); -} - -void *json_object_iter_next(json_t *json, void *iter) { - json_object_t *object; - - if (!json_is_object(json) || iter == NULL) - return NULL; - - object = json_to_object(json); - return hashtable_iter_next(&object->hashtable, iter); -} - -const char *json_object_iter_key(void *iter) { - if (!iter) - return NULL; - - return hashtable_iter_key(iter); -} - -size_t json_object_iter_key_len(void *iter) { - if (!iter) - return 0; - - return hashtable_iter_key_len(iter); -} - -json_t *json_object_iter_value(void *iter) { - if (!iter) - return NULL; - - return (json_t *)hashtable_iter_value(iter); -} - -int json_object_iter_set_new(json_t *json, void *iter, json_t *value) { - if (!json_is_object(json) || !iter || !value) { - json_decref(value); - return -1; - } - - hashtable_iter_set(iter, value); - return 0; -} - -void *json_object_key_to_iter(const char *key) { - if (!key) - return NULL; - - return hashtable_key_to_iter(key); -} - -static int json_object_equal(const json_t *object1, const json_t *object2) { - const char *key; - const json_t *value1, *value2; - - if (json_object_size(object1) != json_object_size(object2)) - return 0; - - json_object_foreach((json_t *)object1, key, value1) { - value2 = json_object_get(object2, key); - - if (!json_equal(value1, value2)) - return 0; - } - - return 1; -} - -static json_t *json_object_copy(json_t *object) { - json_t *result; - - const char *key; - json_t *value; - - result = json_object(); - if (!result) - return NULL; - - json_object_foreach(object, key, value) json_object_set_nocheck(result, key, value); - - return result; -} - -static json_t *json_object_deep_copy(const json_t *object, hashtable_t *parents) { - json_t *result; - void *iter; - char loop_key[LOOP_KEY_LEN]; - size_t loop_key_len; - - if (jsonp_loop_check(parents, object, loop_key, sizeof(loop_key), &loop_key_len)) - return NULL; - - result = json_object(); - if (!result) - goto out; - - /* Cannot use json_object_foreach because object has to be cast - non-const */ - iter = json_object_iter((json_t *)object); - while (iter) { - const char *key; - const json_t *value; - key = json_object_iter_key(iter); - value = json_object_iter_value(iter); - - if (json_object_set_new_nocheck(result, key, do_deep_copy(value, parents))) { - json_decref(result); - result = NULL; - break; - } - iter = json_object_iter_next((json_t *)object, iter); - } - -out: - hashtable_del(parents, loop_key, loop_key_len); - - return result; -} - -/*** array ***/ - -json_t *json_array(void) { - json_array_t *array = jsonp_malloc(sizeof(json_array_t)); - if (!array) - return NULL; - json_init(&array->json, JSON_ARRAY); - - array->entries = 0; - array->size = 8; - - array->table = jsonp_malloc(array->size * sizeof(json_t *)); - if (!array->table) { - jsonp_free(array); - return NULL; - } - - return &array->json; -} - -static void json_delete_array(json_array_t *array) { - size_t i; - - for (i = 0; i < array->entries; i++) - json_decref(array->table[i]); - - jsonp_free(array->table); - jsonp_free(array); -} - -size_t json_array_size(const json_t *json) { - if (!json_is_array(json)) - return 0; - - return json_to_array(json)->entries; -} - -json_t *json_array_get(const json_t *json, size_t index) { - json_array_t *array; - if (!json_is_array(json)) - return NULL; - array = json_to_array(json); - - if (index >= array->entries) - return NULL; - - return array->table[index]; -} - -int json_array_set_new(json_t *json, size_t index, json_t *value) { - json_array_t *array; - - if (!value) - return -1; - - if (!json_is_array(json) || json == value) { - json_decref(value); - return -1; - } - array = json_to_array(json); - - if (index >= array->entries) { - json_decref(value); - return -1; - } - - json_decref(array->table[index]); - array->table[index] = value; - - return 0; -} - -static void array_move(json_array_t *array, size_t dest, size_t src, size_t count) { - memmove(&array->table[dest], &array->table[src], count * sizeof(json_t *)); -} - -static void array_copy(json_t **dest, size_t dpos, json_t **src, size_t spos, - size_t count) { - memcpy(&dest[dpos], &src[spos], count * sizeof(json_t *)); -} - -static json_t **json_array_grow(json_array_t *array, size_t amount, int copy) { - size_t new_size; - json_t **old_table, **new_table; - - if (array->entries + amount <= array->size) - return array->table; - - old_table = array->table; - - new_size = max(array->size + amount, array->size * 2); - new_table = jsonp_malloc(new_size * sizeof(json_t *)); - if (!new_table) - return NULL; - - array->size = new_size; - array->table = new_table; - - if (copy) { - array_copy(array->table, 0, old_table, 0, array->entries); - jsonp_free(old_table); - return array->table; - } - - return old_table; -} - -int json_array_append_new(json_t *json, json_t *value) { - json_array_t *array; - - if (!value) - return -1; - - if (!json_is_array(json) || json == value) { - json_decref(value); - return -1; - } - array = json_to_array(json); - - if (!json_array_grow(array, 1, 1)) { - json_decref(value); - return -1; - } - - array->table[array->entries] = value; - array->entries++; - - return 0; -} - -int json_array_insert_new(json_t *json, size_t index, json_t *value) { - json_array_t *array; - json_t **old_table; - - if (!value) - return -1; - - if (!json_is_array(json) || json == value) { - json_decref(value); - return -1; - } - array = json_to_array(json); - - if (index > array->entries) { - json_decref(value); - return -1; - } - - old_table = json_array_grow(array, 1, 0); - if (!old_table) { - json_decref(value); - return -1; - } - - if (old_table != array->table) { - array_copy(array->table, 0, old_table, 0, index); - array_copy(array->table, index + 1, old_table, index, array->entries - index); - jsonp_free(old_table); - } else - array_move(array, index + 1, index, array->entries - index); - - array->table[index] = value; - array->entries++; - - return 0; -} - -int json_array_remove(json_t *json, size_t index) { - json_array_t *array; - - if (!json_is_array(json)) - return -1; - array = json_to_array(json); - - if (index >= array->entries) - return -1; - - json_decref(array->table[index]); - - /* If we're removing the last element, nothing has to be moved */ - if (index < array->entries - 1) - array_move(array, index, index + 1, array->entries - index - 1); - - array->entries--; - - return 0; -} - -int json_array_clear(json_t *json) { - json_array_t *array; - size_t i; - - if (!json_is_array(json)) - return -1; - array = json_to_array(json); - - for (i = 0; i < array->entries; i++) - json_decref(array->table[i]); - - array->entries = 0; - return 0; -} - -int json_array_extend(json_t *json, json_t *other_json) { - json_array_t *array, *other; - size_t i; - - if (!json_is_array(json) || !json_is_array(other_json)) - return -1; - array = json_to_array(json); - other = json_to_array(other_json); - - if (!json_array_grow(array, other->entries, 1)) - return -1; - - for (i = 0; i < other->entries; i++) - json_incref(other->table[i]); - - array_copy(array->table, array->entries, other->table, 0, other->entries); - - array->entries += other->entries; - return 0; -} - -static int json_array_equal(const json_t *array1, const json_t *array2) { - size_t i, size; - - size = json_array_size(array1); - if (size != json_array_size(array2)) - return 0; - - for (i = 0; i < size; i++) { - json_t *value1, *value2; - - value1 = json_array_get(array1, i); - value2 = json_array_get(array2, i); - - if (!json_equal(value1, value2)) - return 0; - } - - return 1; -} - -static json_t *json_array_copy(json_t *array) { - json_t *result; - size_t i; - - result = json_array(); - if (!result) - return NULL; - - for (i = 0; i < json_array_size(array); i++) - json_array_append(result, json_array_get(array, i)); - - return result; -} - -static json_t *json_array_deep_copy(const json_t *array, hashtable_t *parents) { - json_t *result; - size_t i; - char loop_key[LOOP_KEY_LEN]; - size_t loop_key_len; - - if (jsonp_loop_check(parents, array, loop_key, sizeof(loop_key), &loop_key_len)) - return NULL; - - result = json_array(); - if (!result) - goto out; - - for (i = 0; i < json_array_size(array); i++) { - if (json_array_append_new(result, - do_deep_copy(json_array_get(array, i), parents))) { - json_decref(result); - result = NULL; - break; - } - } - -out: - hashtable_del(parents, loop_key, loop_key_len); - - return result; -} - -/*** string ***/ - -static json_t *string_create(const char *value, size_t len, int own) { - char *v; - json_string_t *string; - - if (!value) - return NULL; - - if (own) - v = (char *)value; - else { - v = jsonp_strndup(value, len); - if (!v) - return NULL; - } - - string = jsonp_malloc(sizeof(json_string_t)); - if (!string) { - jsonp_free(v); - return NULL; - } - json_init(&string->json, JSON_STRING); - string->value = v; - string->length = len; - - return &string->json; -} - -json_t *json_string_nocheck(const char *value) { - if (!value) - return NULL; - - return string_create(value, strlen(value), 0); -} - -json_t *json_stringn_nocheck(const char *value, size_t len) { - return string_create(value, len, 0); -} - -/* this is private; "steal" is not a public API concept */ -json_t *jsonp_stringn_nocheck_own(const char *value, size_t len) { - return string_create(value, len, 1); -} - -json_t *json_string(const char *value) { - if (!value) - return NULL; - - return json_stringn(value, strlen(value)); -} - -json_t *json_stringn(const char *value, size_t len) { - if (!value || !utf8_check_string(value, len)) - return NULL; - - return json_stringn_nocheck(value, len); -} - -const char *json_string_value(const json_t *json) { - if (!json_is_string(json)) - return NULL; - - return json_to_string(json)->value; -} - -size_t json_string_length(const json_t *json) { - if (!json_is_string(json)) - return 0; - - return json_to_string(json)->length; -} - -int json_string_set_nocheck(json_t *json, const char *value) { - if (!value) - return -1; - - return json_string_setn_nocheck(json, value, strlen(value)); -} - -int json_string_setn_nocheck(json_t *json, const char *value, size_t len) { - char *dup; - json_string_t *string; - - if (!json_is_string(json) || !value) - return -1; - - dup = jsonp_strndup(value, len); - if (!dup) - return -1; - - string = json_to_string(json); - jsonp_free(string->value); - string->value = dup; - string->length = len; - - return 0; -} - -int json_string_set(json_t *json, const char *value) { - if (!value) - return -1; - - return json_string_setn(json, value, strlen(value)); -} - -int json_string_setn(json_t *json, const char *value, size_t len) { - if (!value || !utf8_check_string(value, len)) - return -1; - - return json_string_setn_nocheck(json, value, len); -} - -static void json_delete_string(json_string_t *string) { - jsonp_free(string->value); - jsonp_free(string); -} - -static int json_string_equal(const json_t *string1, const json_t *string2) { - json_string_t *s1, *s2; - - s1 = json_to_string(string1); - s2 = json_to_string(string2); - return s1->length == s2->length && !memcmp(s1->value, s2->value, s1->length); -} - -static json_t *json_string_copy(const json_t *string) { - json_string_t *s; - - s = json_to_string(string); - return json_stringn_nocheck(s->value, s->length); -} - -json_t *json_vsprintf(const char *fmt, va_list ap) { - json_t *json = NULL; - int length; - char *buf; - va_list aq; - va_copy(aq, ap); - - length = vsnprintf(NULL, 0, fmt, ap); - if (length < 0) - goto out; - if (length == 0) { - json = json_string(""); - goto out; - } - - buf = jsonp_malloc((size_t)length + 1); - if (!buf) - goto out; - - vsnprintf(buf, (size_t)length + 1, fmt, aq); - if (!utf8_check_string(buf, length)) { - jsonp_free(buf); - goto out; - } - - json = jsonp_stringn_nocheck_own(buf, length); - -out: - va_end(aq); - return json; -} - -json_t *json_sprintf(const char *fmt, ...) { - json_t *result; - va_list ap; - - va_start(ap, fmt); - result = json_vsprintf(fmt, ap); - va_end(ap); - - return result; -} - -/*** integer ***/ - -json_t *json_integer(json_int_t value) { - json_integer_t *integer = jsonp_malloc(sizeof(json_integer_t)); - if (!integer) - return NULL; - json_init(&integer->json, JSON_INTEGER); - - integer->value = value; - return &integer->json; -} - -json_int_t json_integer_value(const json_t *json) { - if (!json_is_integer(json)) - return 0; - - return json_to_integer(json)->value; -} - -int json_integer_set(json_t *json, json_int_t value) { - if (!json_is_integer(json)) - return -1; - - json_to_integer(json)->value = value; - - return 0; -} - -static void json_delete_integer(json_integer_t *integer) { jsonp_free(integer); } - -static int json_integer_equal(const json_t *integer1, const json_t *integer2) { - return json_integer_value(integer1) == json_integer_value(integer2); -} - -static json_t *json_integer_copy(const json_t *integer) { - return json_integer(json_integer_value(integer)); -} - -/*** real ***/ - -json_t *json_real(double value) { - json_real_t *real; - - if (isnan(value) || isinf(value)) - return NULL; - - real = jsonp_malloc(sizeof(json_real_t)); - if (!real) - return NULL; - json_init(&real->json, JSON_REAL); - - real->value = value; - return &real->json; -} - -double json_real_value(const json_t *json) { - if (!json_is_real(json)) - return 0; - - return json_to_real(json)->value; -} - -int json_real_set(json_t *json, double value) { - if (!json_is_real(json) || isnan(value) || isinf(value)) - return -1; - - json_to_real(json)->value = value; - - return 0; -} - -static void json_delete_real(json_real_t *real) { jsonp_free(real); } - -static int json_real_equal(const json_t *real1, const json_t *real2) { - return json_real_value(real1) == json_real_value(real2); -} - -static json_t *json_real_copy(const json_t *real) { - return json_real(json_real_value(real)); -} - -/*** number ***/ - -double json_number_value(const json_t *json) { - if (json_is_integer(json)) - return (double)json_integer_value(json); - else if (json_is_real(json)) - return json_real_value(json); - else - return 0.0; -} - -/*** simple values ***/ - -json_t *json_true(void) { - static json_t the_true = {JSON_TRUE, (size_t)-1}; - return &the_true; -} - -json_t *json_false(void) { - static json_t the_false = {JSON_FALSE, (size_t)-1}; - return &the_false; -} - -json_t *json_null(void) { - static json_t the_null = {JSON_NULL, (size_t)-1}; - return &the_null; -} - -/*** deletion ***/ - -void json_delete(json_t *json) { - if (!json) - return; - - switch (json_typeof(json)) { - case JSON_OBJECT: - json_delete_object(json_to_object(json)); - break; - case JSON_ARRAY: - json_delete_array(json_to_array(json)); - break; - case JSON_STRING: - json_delete_string(json_to_string(json)); - break; - case JSON_INTEGER: - json_delete_integer(json_to_integer(json)); - break; - case JSON_REAL: - json_delete_real(json_to_real(json)); - break; - default: - return; - } - - /* json_delete is not called for true, false or null */ -} - -/*** equality ***/ - -int json_equal(const json_t *json1, const json_t *json2) { - if (!json1 || !json2) - return 0; - - if (json_typeof(json1) != json_typeof(json2)) - return 0; - - /* this covers true, false and null as they are singletons */ - if (json1 == json2) - return 1; - - switch (json_typeof(json1)) { - case JSON_OBJECT: - return json_object_equal(json1, json2); - case JSON_ARRAY: - return json_array_equal(json1, json2); - case JSON_STRING: - return json_string_equal(json1, json2); - case JSON_INTEGER: - return json_integer_equal(json1, json2); - case JSON_REAL: - return json_real_equal(json1, json2); - default: - return 0; - } -} - -/*** copying ***/ - -json_t *json_copy(json_t *json) { - if (!json) - return NULL; - - switch (json_typeof(json)) { - case JSON_OBJECT: - return json_object_copy(json); - case JSON_ARRAY: - return json_array_copy(json); - case JSON_STRING: - return json_string_copy(json); - case JSON_INTEGER: - return json_integer_copy(json); - case JSON_REAL: - return json_real_copy(json); - case JSON_TRUE: - case JSON_FALSE: - case JSON_NULL: - return json; - default: - return NULL; - } -} - -json_t *json_deep_copy(const json_t *json) { - json_t *res; - hashtable_t parents_set; - - if (hashtable_init(&parents_set)) - return NULL; - res = do_deep_copy(json, &parents_set); - hashtable_close(&parents_set); - - return res; -} - -json_t *do_deep_copy(const json_t *json, hashtable_t *parents) { - if (!json) - return NULL; - - switch (json_typeof(json)) { - case JSON_OBJECT: - return json_object_deep_copy(json, parents); - case JSON_ARRAY: - return json_array_deep_copy(json, parents); - /* for the rest of the types, deep copying doesn't differ from - shallow copying */ - case JSON_STRING: - return json_string_copy(json); - case JSON_INTEGER: - return json_integer_copy(json); - case JSON_REAL: - return json_real_copy(json); - case JSON_TRUE: - case JSON_FALSE: - case JSON_NULL: - return (json_t *)json; - default: - return NULL; - } -} diff --git a/solo-ckpool-source/src/jansson-2.14/src/version.c b/solo-ckpool-source/src/jansson-2.14/src/version.c deleted file mode 100644 index f1026af..0000000 --- a/solo-ckpool-source/src/jansson-2.14/src/version.c +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright (c) 2019 Sean Bright - * - * Jansson is free software; you can redistribute it and/or modify - * it under the terms of the MIT license. See LICENSE for details. - */ - -#ifndef _GNU_SOURCE -#define _GNU_SOURCE -#endif - -#include "jansson.h" - -const char *jansson_version_str(void) { return JANSSON_VERSION; } - -int jansson_version_cmp(int major, int minor, int micro) { - int diff; - - if ((diff = JANSSON_MAJOR_VERSION - major)) { - return diff; - } - - if ((diff = JANSSON_MINOR_VERSION - minor)) { - return diff; - } - - return JANSSON_MICRO_VERSION - micro; -} diff --git a/solo-ckpool-source/src/jansson-2.14/test-driver b/solo-ckpool-source/src/jansson-2.14/test-driver deleted file mode 100755 index be73b80..0000000 --- a/solo-ckpool-source/src/jansson-2.14/test-driver +++ /dev/null @@ -1,153 +0,0 @@ -#! /bin/sh -# test-driver - basic testsuite driver script. - -scriptversion=2018-03-07.03; # UTC - -# Copyright (C) 2011-2021 Free Software Foundation, Inc. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2, or (at your option) -# any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program that contains a -# configuration script generated by Autoconf, you may include it under -# the same distribution terms that you use for the rest of that program. - -# This file is maintained in Automake, please report -# bugs to or send patches to -# . - -# Make unconditional expansion of undefined variables an error. This -# helps a lot in preventing typo-related bugs. -set -u - -usage_error () -{ - echo "$0: $*" >&2 - print_usage >&2 - exit 2 -} - -print_usage () -{ - cat <"$log_file" -"$@" >>"$log_file" 2>&1 -estatus=$? - -if test $enable_hard_errors = no && test $estatus -eq 99; then - tweaked_estatus=1 -else - tweaked_estatus=$estatus -fi - -case $tweaked_estatus:$expect_failure in - 0:yes) col=$red res=XPASS recheck=yes gcopy=yes;; - 0:*) col=$grn res=PASS recheck=no gcopy=no;; - 77:*) col=$blu res=SKIP recheck=no gcopy=yes;; - 99:*) col=$mgn res=ERROR recheck=yes gcopy=yes;; - *:yes) col=$lgn res=XFAIL recheck=no gcopy=yes;; - *:*) col=$red res=FAIL recheck=yes gcopy=yes;; -esac - -# Report the test outcome and exit status in the logs, so that one can -# know whether the test passed or failed simply by looking at the '.log' -# file, without the need of also peaking into the corresponding '.trs' -# file (automake bug#11814). -echo "$res $test_name (exit status: $estatus)" >>"$log_file" - -# Report outcome to console. -echo "${col}${res}${std}: $test_name" - -# Register the test result, and other relevant metadata. -echo ":test-result: $res" > $trs_file -echo ":global-test-result: $res" >> $trs_file -echo ":recheck: $recheck" >> $trs_file -echo ":copy-in-global-log: $gcopy" >> $trs_file - -# Local Variables: -# mode: shell-script -# sh-indentation: 2 -# eval: (add-hook 'before-save-hook 'time-stamp) -# time-stamp-start: "scriptversion=" -# time-stamp-format: "%:y-%02m-%02d.%02H" -# time-stamp-time-zone: "UTC0" -# time-stamp-end: "; # UTC" -# End: diff --git a/solo-ckpool-source/src/libckpool.c b/solo-ckpool-source/src/libckpool.c deleted file mode 100644 index 3bfe9a5..0000000 --- a/solo-ckpool-source/src/libckpool.c +++ /dev/null @@ -1,2258 +0,0 @@ -/* - * Copyright 2014-2018,2023 Con Kolivas - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 3 of the License, or (at your option) - * any later version. See COPYING for more details. - */ - -#include "config.h" - -#include -#include -#ifdef HAVE_LINUX_UN_H -#include -#else -#include -#endif -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "libckpool.h" -#include "sha2.h" -#include "utlist.h" - -#ifndef UNIX_PATH_MAX -#define UNIX_PATH_MAX 108 -#endif - -/* We use a weak function as a simple printf within the library that can be - * overridden by however the outside executable wishes to do its logging. */ -void __attribute__((weak)) logmsg(int __maybe_unused loglevel, const char *fmt, ...) -{ - va_list ap; - char *buf; - - va_start(ap, fmt); - VASPRINTF(&buf, fmt, ap); - va_end(ap); - - printf("%s\n", buf); - free(buf); -} - -void rename_proc(const char *name) -{ - char buf[16]; - - snprintf(buf, 15, "ckp@%s", name); - buf[15] = '\0'; - prctl(PR_SET_NAME, buf, 0, 0, 0); -} - -void create_pthread(pthread_t *thread, void *(*start_routine)(void *), void *arg) -{ - int ret = pthread_create(thread, NULL, start_routine, arg); - - if (unlikely(ret)) - quit(1, "Failed to pthread_create"); -} - -void join_pthread(pthread_t thread) -{ - if (!pthread_kill(thread, 0)) - pthread_join(thread, NULL); -} - -struct ck_completion { - sem_t sem; - void (*fn)(void *fnarg); - void *fnarg; -}; - -static void *completion_thread(void *arg) -{ - struct ck_completion *ckc = (struct ck_completion *)arg; - - ckc->fn(ckc->fnarg); - cksem_post(&ckc->sem); - - return NULL; -} - -bool ck_completion_timeout(void *fn, void *fnarg, int timeout) -{ - struct ck_completion ckc; - pthread_t pthread; - bool ret = false; - - cksem_init(&ckc.sem); - ckc.fn = fn; - ckc.fnarg = fnarg; - - pthread_create(&pthread, NULL, completion_thread, (void *)&ckc); - - ret = cksem_mswait(&ckc.sem, timeout); - if (!ret) - pthread_join(pthread, NULL); - else - pthread_cancel(pthread); - return !ret; -} - -int _cond_wait(pthread_cond_t *cond, mutex_t *lock, const char *file, const char *func, const int line) -{ - int ret; - - ret = pthread_cond_wait(cond, &lock->mutex); - lock->file = file; - lock->func = func; - lock->line = line; - return ret; -} - -int _cond_timedwait(pthread_cond_t *cond, mutex_t *lock, const struct timespec *abstime, const char *file, const char *func, const int line) -{ - int ret; - - ret = pthread_cond_timedwait(cond, &lock->mutex, abstime); - lock->file = file; - lock->func = func; - lock->line = line; - return ret; -} - - -int _mutex_timedlock(mutex_t *lock, int timeout, const char *file, const char *func, const int line) -{ - tv_t now; - ts_t abs; - int ret; - - tv_time(&now); - tv_to_ts(&abs, &now); - abs.tv_sec += timeout; - - ret = pthread_mutex_timedlock(&lock->mutex, &abs); - if (!ret) { - lock->file = file; - lock->func = func; - lock->line = line; - } - - return ret; -} - -/* Make every locking attempt warn if we're unable to get the lock for more - * than 10 seconds and fail if we can't get it for longer than a minute. */ -void _mutex_lock(mutex_t *lock, const char *file, const char *func, const int line) -{ - int ret, retries = 0; - -retry: - ret = _mutex_timedlock(lock, 10, file, func, line); - if (unlikely(ret)) { - if (likely(ret == ETIMEDOUT)) { - LOGERR("WARNING: Prolonged mutex lock contention from %s %s:%d, held by %s %s:%d", - file, func, line, lock->file, lock->func, lock->line); - if (++retries < 6) - goto retry; - quitfrom(1, file, func, line, "FAILED TO GRAB MUTEX!"); - } - quitfrom(1, file, func, line, "WTF MUTEX ERROR ON LOCK!"); - } -} - -/* Does not unset lock->file/func/line since they're only relevant when the lock is held */ -void _mutex_unlock(mutex_t *lock, const char *file, const char *func, const int line) -{ - if (unlikely(pthread_mutex_unlock(&lock->mutex))) - quitfrom(1, file, func, line, "WTF MUTEX ERROR ON UNLOCK!"); -} - -int _mutex_trylock(mutex_t *lock, __maybe_unused const char *file, __maybe_unused const char *func, __maybe_unused const int line) -{ - int ret; - - ret = pthread_mutex_trylock(&lock->mutex); - if (!ret) { - lock->file = file; - lock->func = func; - lock->line = line; - } - return ret; -} - -void mutex_destroy(mutex_t *lock) -{ - pthread_mutex_destroy(&lock->mutex); -} - - -static int wr_timedlock(pthread_rwlock_t *lock, int timeout) -{ - tv_t now; - ts_t abs; - int ret; - - tv_time(&now); - tv_to_ts(&abs, &now); - abs.tv_sec += timeout; - - ret = pthread_rwlock_timedwrlock(lock, &abs); - - return ret; -} - -void _wr_lock(rwlock_t *lock, const char *file, const char *func, const int line) -{ - int ret, retries = 0; - -retry: - ret = wr_timedlock(&lock->rwlock, 10); - if (unlikely(ret)) { - if (likely(ret == ETIMEDOUT)) { - LOGERR("WARNING: Prolonged write lock contention from %s %s:%d, held by %s %s:%d", - file, func, line, lock->file, lock->func, lock->line); - if (++retries < 6) - goto retry; - quitfrom(1, file, func, line, "FAILED TO GRAB WRITE LOCK!"); - } - quitfrom(1, file, func, line, "WTF ERROR ON WRITE LOCK!"); - } - lock->file = file; - lock->func = func; - lock->line = line; -} - -int _wr_trylock(rwlock_t *lock, __maybe_unused const char *file, __maybe_unused const char *func, __maybe_unused const int line) -{ - int ret = pthread_rwlock_trywrlock(&lock->rwlock); - - if (!ret) { - lock->file = file; - lock->func = func; - lock->line = line; - } - return ret; -} - -static int rd_timedlock(pthread_rwlock_t *lock, int timeout) -{ - tv_t now; - ts_t abs; - int ret; - - tv_time(&now); - tv_to_ts(&abs, &now); - abs.tv_sec += timeout; - - ret = pthread_rwlock_timedrdlock(lock, &abs); - - return ret; -} - -void _rd_lock(rwlock_t *lock, const char *file, const char *func, const int line) -{ - int ret, retries = 0; - -retry: - ret = rd_timedlock(&lock->rwlock, 10); - if (unlikely(ret)) { - if (likely(ret == ETIMEDOUT)) { - LOGERR("WARNING: Prolonged read lock contention from %s %s:%d, held by %s %s:%d", - file, func, line, lock->file, lock->func, lock->line); - if (++retries < 6) - goto retry; - quitfrom(1, file, func, line, "FAILED TO GRAB READ LOCK!"); - } - quitfrom(1, file, func, line, "WTF ERROR ON READ LOCK!"); - } - lock->file = file; - lock->func = func; - lock->line = line; -} - -void _rw_unlock(rwlock_t *lock, const char *file, const char *func, const int line) -{ - if (unlikely(pthread_rwlock_unlock(&lock->rwlock))) - quitfrom(1, file, func, line, "WTF RWLOCK ERROR ON UNLOCK!"); -} - -void _rd_unlock(rwlock_t *lock, const char *file, const char *func, const int line) -{ - _rw_unlock(lock, file, func, line); -} - -void _wr_unlock(rwlock_t *lock, const char *file, const char *func, const int line) -{ - _rw_unlock(lock, file, func, line); -} - -void _mutex_init(mutex_t *lock, const char *file, const char *func, const int line) -{ - if (unlikely(pthread_mutex_init(&lock->mutex, NULL))) - quitfrom(1, file, func, line, "Failed to pthread_mutex_init"); -} - -void _rwlock_init(rwlock_t *lock, const char *file, const char *func, const int line) -{ - if (unlikely(pthread_rwlock_init(&lock->rwlock, NULL))) - quitfrom(1, file, func, line, "Failed to pthread_rwlock_init"); -} - - -void _cond_init(pthread_cond_t *cond, const char *file, const char *func, const int line) -{ - if (unlikely(pthread_cond_init(cond, NULL))) - quitfrom(1, file, func, line, "Failed to pthread_cond_init!"); -} - -void _cklock_init(cklock_t *lock, const char *file, const char *func, const int line) -{ - _mutex_init(&lock->mutex, file, func, line); - _rwlock_init(&lock->rwlock, file, func, line); -} - - -/* Read lock variant of cklock. Cannot be promoted. */ -void _ck_rlock(cklock_t *lock, const char *file, const char *func, const int line) -{ - _mutex_lock(&lock->mutex, file, func, line); - _rd_lock(&lock->rwlock, file, func, line); - _mutex_unlock(&lock->mutex, file, func, line); -} - -/* Write lock variant of cklock */ -void _ck_wlock(cklock_t *lock, const char *file, const char *func, const int line) -{ - _mutex_lock(&lock->mutex, file, func, line); - _wr_lock(&lock->rwlock, file, func, line); -} - -/* Downgrade write variant to a read lock */ -void _ck_dwlock(cklock_t *lock, const char *file, const char *func, const int line) -{ - _wr_unlock(&lock->rwlock, file, func, line); - _rd_lock(&lock->rwlock, file, func, line); - _mutex_unlock(&lock->mutex, file, func, line); -} - -/* Demote a write variant to an intermediate variant */ -void _ck_dwilock(cklock_t *lock, const char *file, const char *func, const int line) -{ - _wr_unlock(&lock->rwlock, file, func, line); -} - -void _ck_runlock(cklock_t *lock, const char *file, const char *func, const int line) -{ - _rd_unlock(&lock->rwlock, file, func, line); -} - -void _ck_wunlock(cklock_t *lock, const char *file, const char *func, const int line) -{ - _wr_unlock(&lock->rwlock, file, func, line); - _mutex_unlock(&lock->mutex, file, func, line); -} - -void cklock_destroy(cklock_t *lock) -{ - pthread_rwlock_destroy(&lock->rwlock.rwlock); - pthread_mutex_destroy(&lock->mutex.mutex); -} - - -void _cksem_init(sem_t *sem, const char *file, const char *func, const int line) -{ - int ret; - if ((ret = sem_init(sem, 0, 0))) - quitfrom(1, file, func, line, "Failed to sem_init ret=%d errno=%d", ret, errno); -} - -void _cksem_post(sem_t *sem, const char *file, const char *func, const int line) -{ - if (unlikely(sem_post(sem))) - quitfrom(1, file, func, line, "Failed to sem_post errno=%d sem=0x%p", errno, sem); -} - -void _cksem_wait(sem_t *sem, const char *file, const char *func, const int line) -{ - if (unlikely(sem_wait(sem))) { - if (errno == EINTR) - return; - quitfrom(1, file, func, line, "Failed to sem_wait errno=%d sem=0x%p", errno, sem); - } -} - -int _cksem_trywait(sem_t *sem, const char *file, const char *func, const int line) -{ - int ret = sem_trywait(sem); - - if (unlikely(ret && errno != EAGAIN && errno != EINTR)) - quitfrom(1, file, func, line, "Failed to sem_trywait errno=%d sem=0x%p", errno, sem); - return ret; -} - -int _cksem_mswait(sem_t *sem, int ms, const char *file, const char *func, const int line) -{ - ts_t abs_timeout, ts_now; - tv_t tv_now; - int ret; - - tv_time(&tv_now); - tv_to_ts(&ts_now, &tv_now); - ms_to_ts(&abs_timeout, ms); - timeraddspec(&abs_timeout, &ts_now); - ret = sem_timedwait(sem, &abs_timeout); - - if (ret) { - if (likely(errno == ETIMEDOUT)) - return ETIMEDOUT; - if (errno == EINTR) - return EINTR; - quitfrom(1, file, func, line, "Failed to sem_timedwait errno=%d sem=0x%p", errno, sem); - } - return 0; -} - -void _cksem_destroy(sem_t *sem, const char *file, const char *func, const int line) -{ - - if (unlikely(sem_destroy(sem))) - quitfrom(1, file, func, line, "Failed to sem_destroy errno=%d sem=0x%p", errno, sem); -} - -/* Extract just the url and port information from a url string, allocating - * heap memory for sockaddr_url and sockaddr_port. */ -bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port) -{ - char *url_begin, *url_end, *ipv6_begin, *ipv6_end, *port_start = NULL; - char *url_address, *port, *tmp; - int url_len, port_len = 0; - size_t hlen; - - if (!url) { - LOGWARNING("Null length url string passed to extract_sockaddr"); - return false; - } - url_begin = strstr(url, "//"); - if (!url_begin) - url_begin = url; - else - url_begin += 2; - - /* Look for numeric ipv6 entries */ - ipv6_begin = strstr(url_begin, "["); - ipv6_end = strstr(url_begin, "]"); - if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin) - url_end = strstr(ipv6_end, ":"); - else - url_end = strstr(url_begin, ":"); - if (url_end) { - url_len = url_end - url_begin; - port_len = strlen(url_begin) - url_len - 1; - if (port_len < 1) - return false; - port_start = url_end + 1; - } else - url_len = strlen(url_begin); - - /* Get rid of the [] */ - if (ipv6_begin && ipv6_end && ipv6_end > ipv6_begin){ - url_len -= 2; - url_begin++; - } - - if (url_len < 1) { - LOGWARNING("Null length URL passed to extract_sockaddr"); - return false; - } - - hlen = url_len + 1; - url_address = ckalloc(hlen); - sprintf(url_address, "%.*s", url_len, url_begin); - - port = ckalloc(8); - if (port_len) { - char *slash; - - snprintf(port, 6, "%.*s", port_len, port_start); - slash = strchr(port, '/'); - if (slash) - *slash = '\0'; - } else - strcpy(port, "80"); - - /* - * This function may be called with sockaddr_* already set as it may - * be getting updated so we need to free the old entries safely. - * Use a temporary variable so they never dereference */ - if (*sockaddr_port && !safecmp(*sockaddr_port, port)) - free(port); - else { - tmp = *sockaddr_port; - *sockaddr_port = port; - free(tmp); - } - if (*sockaddr_url && !safecmp(*sockaddr_url, url_address)) - free(url_address); - else { - tmp = *sockaddr_url; - *sockaddr_url = url_address; - free(tmp); - } - - return true; -} - -/* Convert a sockaddr structure into a url and port. URL should be a string of - * INET6_ADDRSTRLEN size, port at least a string of 6 bytes */ -bool url_from_sockaddr(const struct sockaddr *addr, char *url, char *port) -{ - int port_no = 0; - - switch(addr->sa_family) { - const struct sockaddr_in *inet4_in; - const struct sockaddr_in6 *inet6_in; - - case AF_INET: - inet4_in = (struct sockaddr_in *)addr; - inet_ntop(AF_INET, &inet4_in->sin_addr, url, INET6_ADDRSTRLEN); - port_no = htons(inet4_in->sin_port); - break; - case AF_INET6: - inet6_in = (struct sockaddr_in6 *)addr; - inet_ntop(AF_INET6, &inet6_in->sin6_addr, url, INET6_ADDRSTRLEN); - port_no = htons(inet6_in->sin6_port); - break; - default: - return false; - } - sprintf(port, "%d", port_no); - return true; -} - -/* Helper for getaddrinfo with the same API that retries while getting - * EAI_AGAIN error */ -static int addrgetinfo(const char *node, const char *service, - const struct addrinfo *hints, - struct addrinfo **res) -{ - int ret; - - do { - ret = getaddrinfo(node, service, hints, res); - } while (ret == EAI_AGAIN); - - return ret; -} - - -bool addrinfo_from_url(const char *url, const char *port, struct addrinfo *addrinfo) -{ - struct addrinfo *servinfo, hints; - - memset(&hints, 0, sizeof(struct addrinfo)); - hints.ai_family = AF_UNSPEC; - hints.ai_socktype = SOCK_STREAM; - servinfo = addrinfo; - if (addrgetinfo(url, port, &hints, &servinfo) != 0) - return false; - if (!servinfo) - return false; - memcpy(addrinfo, servinfo->ai_addr, servinfo->ai_addrlen); - freeaddrinfo(servinfo); - return true; -} - -/* Extract a resolved url and port from a serverurl string. newurl must be - * a string of at least INET6_ADDRSTRLEN and newport at least 6 bytes. */ -bool url_from_serverurl(char *serverurl, char *newurl, char *newport) -{ - char *url = NULL, *port = NULL; - struct addrinfo addrinfo; - bool ret = false; - - if (!extract_sockaddr(serverurl, &url, &port)) { - LOGWARNING("Failed to extract server address from %s", serverurl); - goto out; - } - if (!addrinfo_from_url(url, port, &addrinfo)) { - LOGWARNING("Failed to extract addrinfo from url %s:%s", url, port); - goto out; - } - if (!url_from_sockaddr((const struct sockaddr *)&addrinfo, newurl, newport)) { - LOGWARNING("Failed to extract url from sockaddr for original url: %s:%s", - url, port); - goto out; - } - ret = true; -out: - dealloc(url); - dealloc(port); - return ret; -} - -/* Convert a socket into a url and port. URL should be a string of - * INET6_ADDRSTRLEN size, port at least a string of 6 bytes */ -bool url_from_socket(const int sockd, char *url, char *port) -{ - struct sockaddr_storage storage; - socklen_t addrlen = sizeof(struct sockaddr_storage); - struct sockaddr *addr = (struct sockaddr *)&storage; - - if (sockd < 1) - return false; - if (getsockname(sockd, addr, &addrlen)) - return false; - if (!url_from_sockaddr(addr, url, port)) - return false; - return true; -} - - -void keep_sockalive(int fd) -{ - const int tcp_one = 1; - const int tcp_keepidle = 45; - const int tcp_keepintvl = 30; - - setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (const void *)&tcp_one, sizeof(tcp_one)); - setsockopt(fd, SOL_TCP, TCP_NODELAY, (const void *)&tcp_one, sizeof(tcp_one)); - setsockopt(fd, SOL_TCP, TCP_KEEPCNT, &tcp_one, sizeof(tcp_one)); - setsockopt(fd, SOL_TCP, TCP_KEEPIDLE, &tcp_keepidle, sizeof(tcp_keepidle)); - setsockopt(fd, SOL_TCP, TCP_KEEPINTVL, &tcp_keepintvl, sizeof(tcp_keepintvl)); -} - -void nolinger_socket(int fd) -{ - const struct linger so_linger = { 1, 0 }; - - setsockopt(fd, SOL_SOCKET, SO_LINGER, &so_linger, sizeof(so_linger)); -} - -void noblock_socket(int fd) -{ - int flags = fcntl(fd, F_GETFL, 0); - - fcntl(fd, F_SETFL, O_NONBLOCK | flags); -} - -void block_socket(int fd) -{ - int flags = fcntl(fd, F_GETFL, 0); - - fcntl(fd, F_SETFL, flags & ~O_NONBLOCK); -} - -void _close(int *fd, const char *file, const char *func, const int line) -{ - int sockd; - - if (*fd < 0) - return; - sockd = *fd; - LOGDEBUG("Closing file handle %d", sockd); - *fd = -1; - if (unlikely(close(sockd))) { - LOGWARNING("Close of fd %d failed with errno %d:%s from %s %s:%d", - sockd, errno, strerror(errno), file, func, line); - } -} - -int bind_socket(char *url, char *port) -{ - struct addrinfo servinfobase, *servinfo, hints, *p; - int ret, sockd = -1; - const int on = 1; - - memset(&hints, 0, sizeof(struct addrinfo)); - hints.ai_family = AF_UNSPEC; - hints.ai_socktype = SOCK_STREAM; - servinfo = &servinfobase; - - if (addrgetinfo(url, port, &hints, &servinfo) != 0) { - LOGWARNING("Failed to resolve (?wrong URL) %s:%s", url, port); - return sockd; - } - for (p = servinfo; p != NULL; p = p->ai_next) { - sockd = socket(p->ai_family, p->ai_socktype, p->ai_protocol); - if (sockd > 0) - break; - } - if (sockd < 1 || p == NULL) { - LOGWARNING("Failed to open socket for %s:%s", url, port); - goto out; - } - setsockopt(sockd, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); - ret = bind(sockd, p->ai_addr, p->ai_addrlen); - if (ret < 0) { - LOGWARNING("Failed to bind socket for %s:%s", url, port); - Close(sockd); - goto out; - } - -out: - freeaddrinfo(servinfo); - return sockd; -} - -int connect_socket(char *url, char *port) -{ - struct addrinfo servinfobase, *servinfo, hints, *p; - int sockd = -1; - - memset(&hints, 0, sizeof(struct addrinfo)); - hints.ai_family = AF_UNSPEC; - hints.ai_socktype = SOCK_STREAM; - memset(&servinfobase, 0, sizeof(struct addrinfo)); - servinfo = &servinfobase; - - if (addrgetinfo(url, port, &hints, &servinfo) != 0) { - LOGWARNING("Failed to resolve (?wrong URL) %s:%s", url, port); - goto out; - } - - for (p = servinfo; p != NULL; p = p->ai_next) { - sockd = socket(p->ai_family, p->ai_socktype, p->ai_protocol); - if (sockd == -1) { - LOGDEBUG("Failed socket"); - continue; - } - - /* Iterate non blocking over entries returned by getaddrinfo - * to cope with round robin DNS entries, finding the first one - * we can connect to quickly. */ - noblock_socket(sockd); - if (connect(sockd, p->ai_addr, p->ai_addrlen) == -1) { - int selret; - - if (!sock_connecting()) { - Close(sockd); - LOGDEBUG("Failed sock connect"); - continue; - } - selret = wait_write_select(sockd, 5); - if (selret > 0) { - socklen_t len; - int err, n; - - len = sizeof(err); - n = getsockopt(sockd, SOL_SOCKET, SO_ERROR, (void *)&err, &len); - if (!n && !err) { - LOGDEBUG("Succeeded delayed connect"); - block_socket(sockd); - break; - } - } - Close(sockd); - LOGDEBUG("Select timeout/failed connect"); - continue; - } - LOGDEBUG("Succeeded immediate connect"); - if (sockd >= 0) - block_socket(sockd); - - break; - } - if (p == NULL) { - LOGINFO("Failed to connect to %s:%s", url, port); - sockd = -1; - } - freeaddrinfo(servinfo); -out: - return sockd; -} - -/* Measure the minimum round trip time it should take to get to a url by attempting - * to connect to what should be a closed socket on port 1042. This is a blocking - * function so can take many seconds. Returns 0 on failure */ -int round_trip(char *url) -{ - struct addrinfo servinfobase, *p, hints; - int sockd = -1, ret = 0, i, diff; - tv_t start_tv, end_tv; - char port[] = "1042"; - - memset(&hints, 0, sizeof(struct addrinfo)); - hints.ai_family = AF_UNSPEC; - hints.ai_socktype = SOCK_STREAM; - memset(&servinfobase, 0, sizeof(struct addrinfo)); - p = &servinfobase; - - if (addrgetinfo(url, port, &hints, &p) != 0) { - LOGWARNING("Failed to resolve (?wrong URL) %s:%s", url, port); - return ret; - } - /* This function should be called only on already-resolved IP addresses so - * we only need to use the first result from servinfobase */ - sockd = socket(p->ai_family, p->ai_socktype, p->ai_protocol); - if (sockd == -1) { - LOGERR("Failed socket"); - goto out; - } - /* Attempt to connect 5 times to what should be a closed port and measure - * the time it takes to get a refused message */ - for (i = 0; i < 5; i++) { - tv_time(&start_tv); - if (!connect(sockd, p->ai_addr, p->ai_addrlen) || errno != ECONNREFUSED) { - LOGINFO("Unable to get round trip due to %s:%s connect not being refused", - url, port); - goto out; - } - tv_time(&end_tv); - diff = ms_tvdiff(&end_tv, &start_tv); - if (!ret || diff < ret) - ret = diff; - } - if (ret > 500) { - LOGINFO("Round trip to %s:%s greater than 500ms at %d, clamping to 500", - url, port, diff); - diff = 500; - } - LOGINFO("Minimum round trip to %s:%s calculated as %dms", url, port, ret); -out: - Close(sockd); - freeaddrinfo(p); - return ret; -} - -int write_socket(int fd, const void *buf, size_t nbyte) -{ - int ret; - - ret = wait_write_select(fd, 5); - if (ret < 1) { - if (!ret) - LOGNOTICE("Select timed out in write_socket"); - else - LOGNOTICE("Select failed in write_socket"); - goto out; - } - ret = write_length(fd, buf, nbyte); - if (ret < 0) - LOGNOTICE("Failed to write in write_socket"); -out: - return ret; -} - -void empty_socket(int fd) -{ - char buf[PAGESIZE]; - int ret; - - if (fd < 1) - return; - - do { - ret = recv(fd, buf, PAGESIZE - 1, MSG_DONTWAIT); - if (ret > 0) { - buf[ret] = 0; - LOGDEBUG("Discarding: %s", buf); - } - } while (ret > 0); -} - -void _close_unix_socket(int *sockd, const char *server_path) -{ - LOGDEBUG("Closing unix socket %d %s", *sockd, server_path); - _Close(sockd); -} - -int _open_unix_server(const char *server_path, const char *file, const char *func, const int line) -{ - mode_t mode = S_IRWXU | S_IRWXG; // Owner+Group RWX - struct sockaddr_un serveraddr; - int sockd = -1, len, ret; - struct stat buf; - - if (likely(server_path)) { - len = strlen(server_path); - if (unlikely(len < 1 || len >= UNIX_PATH_MAX)) { - LOGERR("Invalid server path length %d in open_unix_server", len); - goto out; - } - } else { - LOGERR("Null passed as server_path to open_unix_server"); - goto out; - } - - if (!stat(server_path, &buf)) { - if ((buf.st_mode & S_IFMT) == S_IFSOCK) { - ret = unlink(server_path); - if (ret) { - LOGERR("Unlink of %s failed in open_unix_server", server_path); - goto out; - } - LOGDEBUG("Unlinked %s to recreate socket", server_path); - } else { - LOGWARNING("%s already exists and is not a socket, not removing", - server_path); - goto out; - } - } - - sockd = socket(AF_UNIX, SOCK_STREAM, 0); - if (unlikely(sockd < 0)) { - LOGERR("Failed to open socket in open_unix_server"); - goto out; - } - memset(&serveraddr, 0, sizeof(serveraddr)); - serveraddr.sun_family = AF_UNIX; - strcpy(serveraddr.sun_path, server_path); - - ret = bind(sockd, (struct sockaddr *)&serveraddr, sizeof(serveraddr)); - if (unlikely(ret < 0)) { - LOGERR("Failed to bind to socket in open_unix_server"); - close_unix_socket(sockd, server_path); - sockd = -1; - goto out; - } - - ret = chmod(server_path, mode); - if (unlikely(ret < 0)) - LOGERR("Failed to set mode in open_unix_server - continuing"); - - ret = listen(sockd, SOMAXCONN); - if (unlikely(ret < 0)) { - LOGERR("Failed to listen to socket in open_unix_server"); - close_unix_socket(sockd, server_path); - sockd = -1; - goto out; - } - - LOGDEBUG("Opened server path %s successfully on socket %d", server_path, sockd); -out: - if (unlikely(sockd == -1)) - LOGERR("Failure in open_unix_server from %s %s:%d", file, func, line); - return sockd; -} - -int _open_unix_client(const char *server_path, const char *file, const char *func, const int line) -{ - struct sockaddr_un serveraddr; - int sockd = -1, len, ret; - - if (likely(server_path)) { - len = strlen(server_path); - if (unlikely(len < 1 || len >= UNIX_PATH_MAX)) { - LOGERR("Invalid server path length %d in open_unix_client", len); - goto out; - } - } else { - LOGERR("Null passed as server_path to open_unix_client"); - goto out; - } - - sockd = socket(AF_UNIX, SOCK_STREAM, 0); - if (unlikely(sockd < 0)) { - LOGERR("Failed to open socket in open_unix_client"); - goto out; - } - memset(&serveraddr, 0, sizeof(serveraddr)); - serveraddr.sun_family = AF_UNIX; - strcpy(serveraddr.sun_path, server_path); - - ret = connect(sockd, (struct sockaddr *)&serveraddr, sizeof(serveraddr)); - if (unlikely(ret < 0)) { - LOGERR("Failed to bind to socket in open_unix_client"); - Close(sockd); - goto out; - } - - LOGDEBUG("Opened client path %s successfully on socket %d", server_path, sockd); -out: - if (unlikely(sockd == -1)) - LOGERR("Failure in open_unix_client from %s %s:%d", file, func, line); - return sockd; -} - -/* Wait till a socket has been closed at the other end */ -int wait_close(int sockd, int timeout) -{ - struct pollfd sfd; - int ret; - - if (unlikely(sockd < 0)) - return -1; - sfd.fd = sockd; - sfd.events = POLLRDHUP; - sfd.revents = 0; - timeout *= 1000; - ret = poll(&sfd, 1, timeout); - if (ret < 1) - return 0; - return sfd.revents & (POLLHUP | POLLRDHUP | POLLERR); -} - -/* Emulate a select read wait for high fds that select doesn't support. */ -int wait_read_select(int sockd, float timeout) -{ - struct epoll_event event = {0, {NULL}}; - int epfd, ret; - - epfd = epoll_create1(EPOLL_CLOEXEC); - event.events = EPOLLIN | EPOLLRDHUP; - epoll_ctl(epfd, EPOLL_CTL_ADD, sockd, &event); - timeout *= 1000; - ret = epoll_wait(epfd, &event, 1, timeout); - close(epfd); - return ret; -} - -int read_length(int sockd, void *buf, int len) -{ - int ret, ofs = 0; - - if (unlikely(len < 1)) { - LOGWARNING("Invalid read length of %d requested in read_length", len); - return -1; - } - if (unlikely(sockd < 0)) - return -1; - while (len) { - ret = recv(sockd, buf + ofs, len, MSG_WAITALL); - if (unlikely(ret < 1)) - return -1; - ofs += ret; - len -= ret; - } - return ofs; -} - -/* Use a standard message across the unix sockets: - * 4 byte length of message as little endian encoded uint32_t followed by the - * string. Return NULL in case of failure. */ -char *_recv_unix_msg(int sockd, int timeout1, int timeout2, const char *file, const char *func, const int line) -{ - char *buf = NULL; - uint32_t msglen; - int ret, ern; - - ret = wait_read_select(sockd, timeout1); - if (unlikely(ret < 1)) { - ern = errno; - LOGERR("Select1 failed in recv_unix_msg (%d)", ern); - goto out; - } - /* Get message length */ - ret = read_length(sockd, &msglen, 4); - if (unlikely(ret < 4)) { - ern = errno; - LOGERR("Failed to read 4 byte length in recv_unix_msg (%d?)", ern); - goto out; - } - msglen = le32toh(msglen); - if (unlikely(msglen < 1 || msglen > 0x80000000)) { - LOGWARNING("Invalid message length %u sent to recv_unix_msg", msglen); - goto out; - } - ret = wait_read_select(sockd, timeout2); - if (unlikely(ret < 1)) { - ern = errno; - LOGERR("Select2 failed in recv_unix_msg (%d)", ern); - goto out; - } - buf = ckzalloc(msglen + 1); - ret = read_length(sockd, buf, msglen); - if (unlikely(ret < (int)msglen)) { - ern = errno; - LOGERR("Failed to read %u bytes in recv_unix_msg (%d?)", msglen, ern); - dealloc(buf); - } -out: - shutdown(sockd, SHUT_RD); - if (unlikely(!buf)) - LOGERR("Failure in recv_unix_msg from %s %s:%d", file, func, line); - return buf; -} - -/* Emulate a select write wait for high fds that select doesn't support */ -int wait_write_select(int sockd, float timeout) -{ - struct epoll_event event = {0, {NULL}}; - int epfd, ret; - - epfd = epoll_create1(EPOLL_CLOEXEC); - event.events = EPOLLOUT | EPOLLRDHUP ; - epoll_ctl(epfd, EPOLL_CTL_ADD, sockd, &event); - timeout *= 1000; - ret = epoll_wait(epfd, &event, 1, timeout); - close(epfd); - return ret; -} - -int _write_length(int sockd, const void *buf, int len, const char *file, const char *func, const int line) -{ - int ret, ofs = 0, ern; - - if (unlikely(len < 1)) { - LOGWARNING("Invalid write length of %d requested in write_length from %s %s:%d", - len, file, func, line); - return -1; - } - if (unlikely(sockd < 0)) { - ern = errno; - LOGWARNING("Attempt to write to invalidated sock in write_length from %s %s:%d", - file, func, line); - return -1; - } - while (len) { - ret = write(sockd, buf + ofs, len); - if (unlikely(ret < 0)) { - ern = errno; - LOGERR("Failed to write %d bytes in write_length (%d) from %s %s:%d", - len, ern, file, func, line); - return -1; - } - ofs += ret; - len -= ret; - } - return ofs; -} - -bool _send_unix_msg(int sockd, const char *buf, int timeout, const char *file, const char *func, const int line) -{ - uint32_t msglen, len; - bool retval = false; - int ret, ern; - - if (unlikely(sockd < 0)) { - LOGWARNING("Attempting to send unix message to invalidated sockd %d", sockd); - goto out; - } - if (unlikely(!buf)) { - LOGWARNING("Null message sent to send_unix_msg"); - goto out; - } - len = strlen(buf); - if (unlikely(!len)) { - LOGWARNING("Zero length message sent to send_unix_msg"); - goto out; - } - msglen = htole32(len); - ret = wait_write_select(sockd, timeout); - if (unlikely(ret < 1)) { - ern = errno; - LOGERR("Select1 failed in send_unix_msg (%d)", ern); - goto out; - } - ret = _write_length(sockd, &msglen, 4, file, func, line); - if (unlikely(ret < 4)) { - LOGERR("Failed to write 4 byte length in send_unix_msg"); - goto out; - } - ret = wait_write_select(sockd, timeout); - if (unlikely(ret < 1)) { - ern = errno; - LOGERR("Select2 failed in send_unix_msg (%d)", ern); - goto out; - } - ret = _write_length(sockd, buf, len, file, func, line); - if (unlikely(ret < 0)) { - LOGERR("Failed to write %d bytes in send_unix_msg", len); - goto out; - } - retval = true; -out: - shutdown(sockd, SHUT_WR); - if (unlikely(!retval)) - LOGERR("Failure in send_unix_msg from %s %s:%d", file, func, line); - return retval; -} - -bool _send_unix_data(int sockd, const struct msghdr *msg, const char *file, const char *func, const int line) -{ - bool retval = false; - int ret; - - if (unlikely(!msg)) { - LOGWARNING("Null message sent to send_unix_data"); - goto out; - } - ret = wait_write_select(sockd, UNIX_WRITE_TIMEOUT); - if (unlikely(ret < 1)) { - LOGERR("Select1 failed in send_unix_data"); - goto out; - } - ret = sendmsg(sockd, msg, 0); - if (unlikely(ret < 1)) { - LOGERR("Failed to send in send_unix_data"); - goto out; - } - retval = true; -out: - shutdown(sockd, SHUT_WR); - if (unlikely(!retval)) - LOGERR("Failure in send_unix_data from %s %s:%d", file, func, line); - return retval; -} - -bool _recv_unix_data(int sockd, struct msghdr *msg, const char *file, const char *func, const int line) -{ - bool retval = false; - int ret; - - ret = wait_read_select(sockd, UNIX_READ_TIMEOUT); - if (unlikely(ret < 1)) { - LOGERR("Select1 failed in recv_unix_data"); - goto out; - } - ret = recvmsg(sockd, msg, MSG_WAITALL); - if (unlikely(ret < 0)) { - LOGERR("Failed to recv in recv_unix_data"); - goto out; - } - retval = true; -out: - shutdown(sockd, SHUT_RD); - if (unlikely(!retval)) - LOGERR("Failure in recv_unix_data from %s %s:%d", file, func, line); - return retval; -} - -#define CONTROLLLEN CMSG_LEN(sizeof(int)) -#define MAXLINE 4096 - -/* Send a msghdr containing fd via the unix socket sockd */ -bool _send_fd(int fd, int sockd, const char *file, const char *func, const int line) -{ - struct cmsghdr *cmptr = ckzalloc(CONTROLLLEN); - struct iovec iov[1]; - struct msghdr msg; - char buf[2]; - bool ret; - int *cm; - - memset(&msg, 0, sizeof(struct msghdr)); - iov[0].iov_base = buf; - iov[0].iov_len = 2; - msg.msg_iov = iov; - msg.msg_iovlen = 1; - msg.msg_name = NULL; - msg.msg_namelen = 0; - msg.msg_controllen = CONTROLLLEN; - msg.msg_control = cmptr; - cmptr->cmsg_level = SOL_SOCKET; - cmptr->cmsg_type = SCM_RIGHTS; - cmptr->cmsg_len = CONTROLLLEN; - cm = (int *)CMSG_DATA(cmptr); - *cm = fd; - buf[1] = 0; - buf[0] = 0; - ret = send_unix_data(sockd, &msg); - free(cmptr); - if (!ret) - LOGERR("Failed to send_unix_data in send_fd from %s %s:%d", file, func, line); - return ret; -} - -/* Receive an fd by reading a msghdr from the unix socket sockd */ -int _get_fd(int sockd, const char *file, const char *func, const int line) -{ - int newfd = -1; - char buf[MAXLINE]; - struct iovec iov[1]; - struct msghdr msg; - struct cmsghdr *cmptr = ckzalloc(CONTROLLLEN); - int *cm; - - memset(&msg, 0, sizeof(struct msghdr)); - iov[0].iov_base = buf; - iov[0].iov_len = sizeof(buf); - msg.msg_iov = iov; - msg.msg_name = NULL; - msg.msg_namelen = 0; - msg.msg_control = cmptr; - msg.msg_controllen = CONTROLLLEN; - if (!recv_unix_data(sockd, &msg)) { - LOGERR("Failed to recv_unix_data in get_fd from %s %s:%d", file, func, line); - goto out; - } -out: - cm = (int *)CMSG_DATA(cmptr); - newfd = *cm; - free(cmptr); - return newfd; -} - - -void _json_check(json_t *val, json_error_t *err, const char *file, const char *func, const int line) -{ - if (likely(val)) - return; - - LOGERR("Invalid json line:%d col:%d pos:%d text: %s from %s %s:%d", - err->line, err->column, err->position, err->text, - file, func, line); -} - -/* Extracts a string value from a json array with error checking. To be used - * when the value of the string returned is only examined and not to be stored. - * See json_array_string below */ -const char *__json_array_string(json_t *val, unsigned int entry) -{ - json_t *arr_entry; - - if (json_is_null(val)) - return NULL; - if (!json_is_array(val)) - return NULL; - if (entry > json_array_size(val)) - return NULL; - arr_entry = json_array_get(val, entry); - if (!json_is_string(arr_entry)) - return NULL; - - return json_string_value(arr_entry); -} - -/* Creates a freshly malloced dup of __json_array_string */ -char *json_array_string(json_t *val, unsigned int entry) -{ - const char *buf = __json_array_string(val, entry); - - if (buf) - return strdup(buf); - return NULL; -} - -json_t *json_object_dup(json_t *val, const char *entry) -{ - return json_copy(json_object_get(val, entry)); -} - -char *rotating_filename(const char *path, time_t when) -{ - char *filename; - struct tm tm; - - gmtime_r(&when, &tm); - ASPRINTF(&filename, "%s%04d%02d%02d%02d.log", path, tm.tm_year + 1900, tm.tm_mon + 1, - tm.tm_mday, tm.tm_hour); - return filename; -} - -/* Creates a logfile entry which changes filename hourly with exclusive access */ -bool rotating_log(const char *path, const char *msg) -{ - mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; - char *filename; - FILE *fp; - int fd; - bool ok = false; - - filename = rotating_filename(path, time(NULL)); - fd = open(filename, O_CREAT | O_RDWR | O_CLOEXEC , mode); - if (unlikely(fd == -1)) { - LOGERR("Failed to open %s in rotating_log!", filename); - goto stageleft; - } - fp = fdopen(fd, "ae"); - if (unlikely(!fp)) { - Close(fd); - LOGERR("Failed to fdopen %s in rotating_log!", filename); - goto stageleft; - } - if (unlikely(flock(fd, LOCK_EX))) { - fclose(fp); - LOGERR("Failed to flock %s in rotating_log!", filename); - goto stageleft; - } - fprintf(fp, "%s\n", msg); - fclose(fp); - ok = true; - -stageleft: - free(filename); - - return ok; -} - -/* Align a size_t to 4 byte boundaries for fussy arches */ -void align_len(size_t *len) -{ - if (*len % 4) - *len += 4 - (*len % 4); -} - -/* Malloc failure should be fatal but keep backing off and retrying as the OS - * will kill us eventually if it can't recover. */ -void realloc_strcat(char **ptr, const char *s) -{ - size_t old, new, len; - int backoff = 1; - void *new_ptr; - char *ofs; - - if (unlikely(!*s)) { - LOGWARNING("Passed empty pointer to realloc_strcat"); - return; - } - new = strlen(s); - if (unlikely(!new)) { - LOGWARNING("Passed empty string to realloc_strcat"); - return; - } - if (!*ptr) - old = 0; - else - old = strlen(*ptr); - len = old + new + 1; - len = round_up_page(len); - while (42) { - new_ptr = realloc(*ptr, len); - if (likely(new_ptr)) - break; - if (backoff == 1) - fprintf(stderr, "Failed to realloc %d, retrying\n", (int)len); - cksleep_ms(backoff); - backoff <<= 1; - } - *ptr = new_ptr; - ofs = *ptr + old; - sprintf(ofs, "%s", s); -} - -void trail_slash(char **buf) -{ - int ofs; - - ofs = strlen(*buf) - 1; - if (memcmp(*buf + ofs, "/", 1)) - realloc_strcat(buf, "/"); -} - -void *_ckalloc(size_t len, const char *file, const char *func, const int line) -{ - int backoff = 1; - void *ptr; - - align_len(&len); - while (42) { - ptr = malloc(len); - if (likely(ptr)) - break; - if (backoff == 1) { - fprintf(stderr, "Failed to ckalloc %d, retrying from %s %s:%d\n", - (int)len, file, func, line); - } - cksleep_ms(backoff); - backoff <<= 1; - } - return ptr; -} - -void *json_ckalloc(size_t size) -{ - return _ckalloc(size, __FILE__, __func__, __LINE__); -} - -void *_ckzalloc(size_t len, const char *file, const char *func, const int line) -{ - int backoff = 1; - void *ptr; - - align_len(&len); - while (42) { - ptr = calloc(len, 1); - if (likely(ptr)) - break; - if (backoff == 1) { - fprintf(stderr, "Failed to ckzalloc %d, retrying from %s %s:%d\n", - (int)len, file, func, line); - } - cksleep_ms(backoff); - backoff <<= 1; - } - return ptr; -} - -/* Round up to the nearest page size for efficient malloc */ -size_t round_up_page(size_t len) -{ - int rem = len % PAGESIZE; - - if (rem) - len += PAGESIZE - rem; - return len; -} - - - -/* Adequate size s==len*2 + 1 must be alloced to use this variant */ -void __bin2hex(void *vs, const void *vp, size_t len) -{ - static const char hex[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; - const uchar *p = vp; - uchar *s = vs; - int i; - - for (i = 0; i < (int)len; i++) { - *s++ = hex[p[i] >> 4]; - *s++ = hex[p[i] & 0xF]; - } - *s++ = '\0'; -} - -/* Returns a malloced array string of a binary value of arbitrary length. The - * array is rounded up to a 4 byte size to appease architectures that need - * aligned array sizes */ -void *bin2hex(const void *vp, size_t len) -{ - const uchar *p = vp; - size_t slen; - uchar *s; - - slen = len * 2 + 1; - s = ckzalloc(slen); - __bin2hex(s, p, len); - - return s; -} - -const int hex2bin_tbl[256] = { - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, - -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -}; - -bool _validhex(const char *buf, const char *file, const char *func, const int line) -{ - unsigned int i, slen; - bool ret = false; - - slen = strlen(buf); - if (!slen || slen % 2) { - LOGDEBUG("Invalid hex due to length %u from %s %s:%d", slen, file, func, line); - goto out; - } - for (i = 0; i < slen; i++) { - uchar idx = buf[i]; - - if (hex2bin_tbl[idx] == -1) { - LOGDEBUG("Invalid hex due to value %u at offset %d from %s %s:%d", - idx, i, file, func, line); - goto out; - } - } - ret = true; -out: - return ret; -} - -/* Does the reverse of bin2hex but does not allocate any ram */ -bool _hex2bin(void *vp, const void *vhexstr, size_t len, const char *file, const char *func, const int line) -{ - const uchar *hexstr = vhexstr; - int nibble1, nibble2; - bool ret = false; - uchar *p = vp; - uchar idx; - - while (*hexstr && len) { - if (unlikely(!hexstr[1])) { - LOGWARNING("Early end of string in hex2bin from %s %s:%d", file, func, line); - return ret; - } - - idx = *hexstr++; - nibble1 = hex2bin_tbl[idx]; - idx = *hexstr++; - nibble2 = hex2bin_tbl[idx]; - - if (unlikely((nibble1 < 0) || (nibble2 < 0))) { - LOGWARNING("Invalid binary encoding in hex2bin from %s %s:%d", file, func, line); - return ret; - } - - *p++ = (((uchar)nibble1) << 4) | ((uchar)nibble2); - --len; - } - - if (likely(len == 0 && *hexstr == 0)) - ret = true; - if (!ret) - LOGWARNING("Failed hex2bin decode from %s %s:%d", file, func, line); - return ret; -} - -static const int b58tobin_tbl[] = { - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, -1, -1, -1, -1, -1, -1, - -1, 9, 10, 11, 12, 13, 14, 15, 16, -1, 17, 18, 19, 20, 21, -1, - 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, -1, -1, -1, -1, -1, - -1, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, -1, 44, 45, 46, - 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57 -}; - -/* b58bin should always be at least 25 bytes long and already checked to be - * valid. */ -void b58tobin(char *b58bin, const char *b58) -{ - uint32_t c, bin32[7]; - int len, i, j; - uint64_t t; - - memset(bin32, 0, 7 * sizeof(uint32_t)); - len = strlen((const char *)b58); - for (i = 0; i < len; i++) { - c = b58[i]; - c = b58tobin_tbl[c]; - for (j = 6; j >= 0; j--) { - t = ((uint64_t)bin32[j]) * 58 + c; - c = (t & 0x3f00000000ull) >> 32; - bin32[j] = t & 0xffffffffull; - } - } - *(b58bin++) = bin32[0] & 0xff; - for (i = 1; i < 7; i++) { - *((uint32_t *)b58bin) = htobe32(bin32[i]); - b58bin += sizeof(uint32_t); - } -} - -/* Does a safe string comparison tolerating zero length and NULL strings */ -int safecmp(const char *a, const char *b) -{ - int lena, lenb; - - if (unlikely(!a || !b)) { - if (a != b) - return -1; - return 0; - } - lena = strlen(a); - lenb = strlen(b); - if (unlikely(!lena || !lenb)) { - if (lena != lenb) - return -1; - return 0; - } - return (strcmp(a, b)); -} - -/* Returns whether there is a case insensitive match of buf to cmd, safely - * handling NULL or zero length strings. */ -bool cmdmatch(const char *buf, const char *cmd) -{ - int cmdlen, buflen; - - if (!buf) - return false; - buflen = strlen(buf); - if (!buflen) - return false; - cmdlen = strlen(cmd); - if (buflen < cmdlen) - return false; - return !strncasecmp(buf, cmd, cmdlen); -} - - -static const char base64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; - -/* Return a malloced string of *src encoded into mime base 64 */ -char *http_base64(const char *src) -{ - char *str, *dst; - size_t l, hlen; - int t, r; - - l = strlen((const char *)src); - hlen = ((l + 2) / 3) * 4 + 1; - str = ckalloc(hlen); - dst = str; - r = 0; - - while (l >= 3) { - t = (src[0] << 16) | (src[1] << 8) | src[2]; - dst[0] = base64[(t >> 18) & 0x3f]; - dst[1] = base64[(t >> 12) & 0x3f]; - dst[2] = base64[(t >> 6) & 0x3f]; - dst[3] = base64[(t >> 0) & 0x3f]; - src += 3; l -= 3; - dst += 4; r += 4; - } - - switch (l) { - case 2: - t = (src[0] << 16) | (src[1] << 8); - dst[0] = base64[(t >> 18) & 0x3f]; - dst[1] = base64[(t >> 12) & 0x3f]; - dst[2] = base64[(t >> 6) & 0x3f]; - dst[3] = '='; - dst += 4; - r += 4; - break; - case 1: - t = src[0] << 16; - dst[0] = base64[(t >> 18) & 0x3f]; - dst[1] = base64[(t >> 12) & 0x3f]; - dst[2] = dst[3] = '='; - dst += 4; - r += 4; - break; - case 0: - break; - } - *dst = 0; - return (str); -} - -static const int8_t charset_rev[128] = { - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - 15, -1, 10, 17, 21, 20, 26, 30, 7, 5, -1, -1, -1, -1, -1, -1, - -1, 29, -1, 24, 13, 25, 9, 8, 23, -1, 18, 22, 31, 27, 19, -1, - 1, 0, 3, 16, 11, 28, 12, 14, 6, 4, 2, -1, -1, -1, -1, -1, - -1, 29, -1, 24, 13, 25, 9, 8, 23, -1, 18, 22, 31, 27, 19, -1, - 1, 0, 3, 16, 11, 28, 12, 14, 6, 4, 2, -1, -1, -1, -1, -1 -}; - -/* It's assumed that there is no chance of sending invalid chars to these - * functions as they should have been checked beforehand. */ -static void bech32_decode(uint8_t *data, int *data_len, const char *input) -{ - int input_len = strlen(input), hrp_len, i; - - *data_len = 0; - while (*data_len < input_len && input[(input_len - 1) - *data_len] != '1') - ++(*data_len); - hrp_len = input_len - (1 + *data_len); - *(data_len) -= 6; - for (i = hrp_len + 1; i < input_len; i++) { - int v = (input[i] & 0x80) ? -1 : charset_rev[(int)input[i]]; - - if (i + 6 < input_len) - data[i - (1 + hrp_len)] = v; - } -} - -static void convert_bits(char *out, int *outlen, const uint8_t *in, - int inlen) -{ - const int outbits = 8, inbits = 5; - uint32_t val = 0, maxv = (((uint32_t)1) << outbits) - 1; - int bits = 0; - - while (inlen--) { - val = (val << inbits) | *(in++); - bits += inbits; - while (bits >= outbits) { - bits -= outbits; - out[(*outlen)++] = (val >> bits) & maxv; - } - } -} - -static int address_to_pubkeytxn(char *pkh, const char *addr) -{ - char b58bin[25] = {}; - - b58tobin(b58bin, addr); - pkh[0] = 0x76; - pkh[1] = 0xa9; - pkh[2] = 0x14; - memcpy(&pkh[3], &b58bin[1], 20); - pkh[23] = 0x88; - pkh[24] = 0xac; - return 25; -} - -static int address_to_scripttxn(char *psh, const char *addr) -{ - char b58bin[25] = {}; - - b58tobin(b58bin, addr); - psh[0] = 0xa9; - psh[1] = 0x14; - memcpy(&psh[2], &b58bin[1], 20); - psh[22] = 0x87; - return 23; -} - -static int segaddress_to_txn(char *p2h, const char *addr) -{ - int data_len, witdata_len = 0; - char *witdata = &p2h[2]; - uint8_t data[84]; - - bech32_decode(data, &data_len, addr); - p2h[0] = data[0]; - /* Witness version is > 0 */ - if (p2h[0]) - p2h[0] += 0x50; - convert_bits(witdata, &witdata_len, data + 1, data_len - 1); - p2h[1] = witdata_len; - return witdata_len + 2; -} - -/* Convert an address to a transaction and return the length of the transaction */ -int address_to_txn(char *p2h, const char *addr, const bool script, const bool segwit) -{ - if (segwit) - return segaddress_to_txn(p2h, addr); - if (script) - return address_to_scripttxn(p2h, addr); - return address_to_pubkeytxn(p2h, addr); -} - -/* For encoding nHeight into coinbase, return how many bytes were used */ -int ser_number(uchar *s, int32_t val) -{ - int32_t *i32 = (int32_t *)&s[1]; - int len; - - if (val < 0x80) - len = 1; - else if (val < 0x8000) - len = 2; - else if (val < 0x800000) - len = 3; - else - len = 4; - *i32 = htole32(val); - s[0] = len++; - return len; -} - -int get_sernumber(uchar *s) -{ - int32_t val = 0; - int len; - - len = s[0]; - if (unlikely(len < 1 || len > 4)) - return 0; - memcpy(&val, &s[1], len); - return le32toh(val); -} - -/* For testing a le encoded 256 byte hash against a target */ -bool fulltest(const uchar *hash, const uchar *target) -{ - uint32_t *hash32 = (uint32_t *)hash; - uint32_t *target32 = (uint32_t *)target; - bool ret = true; - int i; - - for (i = 28 / 4; i >= 0; i--) { - uint32_t h32tmp = le32toh(hash32[i]); - uint32_t t32tmp = le32toh(target32[i]); - - if (h32tmp > t32tmp) { - ret = false; - break; - } - if (h32tmp < t32tmp) { - ret = true; - break; - } - } - return ret; -} - -void copy_tv(tv_t *dest, const tv_t *src) -{ - memcpy(dest, src, sizeof(tv_t)); -} - -void ts_to_tv(tv_t *val, const ts_t *spec) -{ - val->tv_sec = spec->tv_sec; - val->tv_usec = spec->tv_nsec / 1000; -} - -void tv_to_ts(ts_t *spec, const tv_t *val) -{ - spec->tv_sec = val->tv_sec; - spec->tv_nsec = val->tv_usec * 1000; -} - -void us_to_tv(tv_t *val, int64_t us) -{ - lldiv_t tvdiv = lldiv(us, 1000000); - - val->tv_sec = tvdiv.quot; - val->tv_usec = tvdiv.rem; -} - -void us_to_ts(ts_t *spec, int64_t us) -{ - lldiv_t tvdiv = lldiv(us, 1000000); - - spec->tv_sec = tvdiv.quot; - spec->tv_nsec = tvdiv.rem * 1000; -} - -void ms_to_ts(ts_t *spec, int64_t ms) -{ - lldiv_t tvdiv = lldiv(ms, 1000); - - spec->tv_sec = tvdiv.quot; - spec->tv_nsec = tvdiv.rem * 1000000; -} - -void ms_to_tv(tv_t *val, int64_t ms) -{ - lldiv_t tvdiv = lldiv(ms, 1000); - - val->tv_sec = tvdiv.quot; - val->tv_usec = tvdiv.rem * 1000; -} - -void tv_time(tv_t *tv) -{ - gettimeofday(tv, NULL); -} - -void ts_realtime(ts_t *ts) -{ - clock_gettime(CLOCK_REALTIME, ts); -} - -void cksleep_prepare_r(ts_t *ts) -{ - clock_gettime(CLOCK_MONOTONIC, ts); -} - -void nanosleep_abstime(ts_t *ts_end) -{ - clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, ts_end, NULL); -} - -void timeraddspec(ts_t *a, const ts_t *b) -{ - a->tv_sec += b->tv_sec; - a->tv_nsec += b->tv_nsec; - if (a->tv_nsec >= 1000000000) { - a->tv_nsec -= 1000000000; - a->tv_sec++; - } -} - -/* Reentrant version of cksleep functions allow start time to be set separately - * from the beginning of the actual sleep, allowing scheduling delays to be - * counted in the sleep. */ -void cksleep_ms_r(ts_t *ts_start, int ms) -{ - ts_t ts_end; - - ms_to_ts(&ts_end, ms); - timeraddspec(&ts_end, ts_start); - nanosleep_abstime(&ts_end); -} - -void cksleep_us_r(ts_t *ts_start, int64_t us) -{ - ts_t ts_end; - - us_to_ts(&ts_end, us); - timeraddspec(&ts_end, ts_start); - nanosleep_abstime(&ts_end); -} - -void cksleep_ms(int ms) -{ - ts_t ts_start; - - cksleep_prepare_r(&ts_start); - cksleep_ms_r(&ts_start, ms); -} - -void cksleep_us(int64_t us) -{ - ts_t ts_start; - - cksleep_prepare_r(&ts_start); - cksleep_us_r(&ts_start, us); -} - -/* Returns the microseconds difference between end and start times as a double */ -double us_tvdiff(tv_t *end, tv_t *start) -{ - /* Sanity check. We should only be using this for small differences so - * limit the max to 60 seconds. */ - if (unlikely(end->tv_sec - start->tv_sec > 60)) - return 60000000; - return (end->tv_sec - start->tv_sec) * 1000000 + (end->tv_usec - start->tv_usec); -} - -/* Returns the milliseconds difference between end and start times */ -int ms_tvdiff(tv_t *end, tv_t *start) -{ - /* Like us_tdiff, limit to 1 hour. */ - if (unlikely(end->tv_sec - start->tv_sec > 3600)) - return 3600000; - return (end->tv_sec - start->tv_sec) * 1000 + (end->tv_usec - start->tv_usec) / 1000; -} - -/* Returns the seconds difference between end and start times as a double */ -double tvdiff(tv_t *end, tv_t *start) -{ - return end->tv_sec - start->tv_sec + (end->tv_usec - start->tv_usec) / 1000000.0; -} - -/* Create an exponentially decaying average over interval */ -void decay_time(double *f, double fadd, double fsecs, double interval) -{ - double ftotal, fprop, dexp; - - if (fsecs <= 0) - return; - dexp = fsecs / interval; - /* Put Sanity bound on how large the denominator can get */ - if (unlikely(dexp > 36)) - dexp = 36; - fprop = 1.0 - 1 / exp(dexp); - ftotal = 1.0 + fprop; - *f += (fadd / fsecs * fprop); - *f /= ftotal; - /* Sanity check to prevent meaningless super small numbers that - * eventually underflow libjansson's real number interpretation. */ - if (unlikely(*f < 2E-16)) - *f = 0; -} - -/* Sanity check to prevent clock adjustments backwards from screwing up stats */ -double sane_tdiff(tv_t *end, tv_t *start) -{ - double tdiff = tvdiff(end, start); - - if (unlikely(tdiff < 0.001)) - tdiff = 0.001; - return tdiff; -} - -/* Convert a double value into a truncated string for displaying with its - * associated suitable for Mega, Giga etc. Buf array needs to be long enough */ -void suffix_string(double val, char *buf, size_t bufsiz, int sigdigits) -{ - const double kilo = 1000; - const double mega = 1000000; - const double giga = 1000000000; - const double tera = 1000000000000; - const double peta = 1000000000000000; - const double exa = 1000000000000000000; - char suffix[2] = ""; - bool decimal = true; - double dval; - - if (val >= exa) { - val /= peta; - dval = val / kilo; - strcpy(suffix, "E"); - } else if (val >= peta) { - val /= tera; - dval = val / kilo; - strcpy(suffix, "P"); - } else if (val >= tera) { - val /= giga; - dval = val / kilo; - strcpy(suffix, "T"); - } else if (val >= giga) { - val /= mega; - dval = val / kilo; - strcpy(suffix, "G"); - } else if (val >= mega) { - val /= kilo; - dval = val / kilo; - strcpy(suffix, "M"); - } else if (val >= kilo) { - dval = val / kilo; - strcpy(suffix, "K"); - } else { - dval = val; - decimal = false; - } - - if (!sigdigits) { - if (decimal) - snprintf(buf, bufsiz, "%.3g%s", dval, suffix); - else - snprintf(buf, bufsiz, "%d%s", (unsigned int)dval, suffix); - } else { - /* Always show sigdigits + 1, padded on right with zeroes - * followed by suffix */ - int ndigits = sigdigits - 1 - (dval > 0.0 ? floor(log10(dval)) : 0); - - snprintf(buf, bufsiz, "%*.*f%s", sigdigits + 1, ndigits, dval, suffix); - } -} - -/* truediffone == 0x00000000FFFF0000000000000000000000000000000000000000000000000000 - * Generate a 256 bit binary LE target by cutting up diff into 64 bit sized - * portions or vice versa. */ -static const double truediffone = 26959535291011309493156476344723991336010898738574164086137773096960.0; -static const double bits192 = 6277101735386680763835789423207666416102355444464034512896.0; -static const double bits128 = 340282366920938463463374607431768211456.0; -static const double bits64 = 18446744073709551616.0; - -/* Converts a little endian 256 bit value to a double */ -double le256todouble(const uchar *target) -{ - uint64_t *data64; - double dcut64; - - data64 = (uint64_t *)(target + 24); - dcut64 = le64toh(*data64) * bits192; - - data64 = (uint64_t *)(target + 16); - dcut64 += le64toh(*data64) * bits128; - - data64 = (uint64_t *)(target + 8); - dcut64 += le64toh(*data64) * bits64; - - data64 = (uint64_t *)(target); - dcut64 += le64toh(*data64); - - return dcut64; -} - -/* Converts a big endian 256 bit value to a double */ -double be256todouble(const uchar *target) -{ - uint64_t *data64; - double dcut64; - - data64 = (uint64_t *)(target); - dcut64 = be64toh(*data64) * bits192; - - data64 = (uint64_t *)(target + 8); - dcut64 += be64toh(*data64) * bits128; - - data64 = (uint64_t *)(target + 16); - dcut64 += be64toh(*data64) * bits64; - - data64 = (uint64_t *)(target + 24); - dcut64 += be64toh(*data64); - - return dcut64; -} - -/* Return a difficulty from a binary target */ -double diff_from_target(uchar *target) -{ - double dcut64; - - dcut64 = le256todouble(target); - if (unlikely(dcut64 <= 0)) - dcut64 = 1; - return truediffone / dcut64; -} - -/* Return a difficulty from a binary big endian target */ -double diff_from_betarget(uchar *target) -{ - double dcut64; - - dcut64 = be256todouble(target); - if (unlikely(dcut64 <= 0)) - dcut64 = 1; - return truediffone / dcut64; -} - -/* Return the network difficulty from the block header which is in packed form, - * as a double. */ -double diff_from_nbits(char *nbits) -{ - uint8_t shift = nbits[0]; - uchar target[32] = {}; - char *nb; - - nb = bin2hex(nbits, 4); - LOGDEBUG("Nbits is %s", nb); - free(nb); - if (unlikely(shift < 3)) { - LOGWARNING("Corrupt shift of %d in nbits", shift); - shift = 3; - } else if (unlikely(shift > 32)) { - LOGWARNING("Corrupt shift of %d in nbits", shift); - shift = 32; - } - memcpy(target + (32 - shift), nbits + 1, 3); - return diff_from_betarget(target); -} - -void target_from_diff(uchar *target, double diff) -{ - uint64_t *data64, h64; - double d64, dcut64; - - if (unlikely(diff == 0.0)) { - /* This shouldn't happen but best we check to prevent a crash */ - memset(target, 0xff, 32); - return; - } - - d64 = truediffone; - d64 /= diff; - - dcut64 = d64 / bits192; - h64 = dcut64; - data64 = (uint64_t *)(target + 24); - *data64 = htole64(h64); - dcut64 = h64; - dcut64 *= bits192; - d64 -= dcut64; - - dcut64 = d64 / bits128; - h64 = dcut64; - data64 = (uint64_t *)(target + 16); - *data64 = htole64(h64); - dcut64 = h64; - dcut64 *= bits128; - d64 -= dcut64; - - dcut64 = d64 / bits64; - h64 = dcut64; - data64 = (uint64_t *)(target + 8); - *data64 = htole64(h64); - dcut64 = h64; - dcut64 *= bits64; - d64 -= dcut64; - - h64 = d64; - data64 = (uint64_t *)(target); - *data64 = htole64(h64); -} - -void gen_hash(uchar *data, uchar *hash, int len) -{ - uchar hash1[32]; - - sha256(data, len, hash1); - sha256(hash1, 32, hash); -} diff --git a/solo-ckpool-source/src/libckpool.h b/solo-ckpool-source/src/libckpool.h deleted file mode 100644 index 78a83fe..0000000 --- a/solo-ckpool-source/src/libckpool.h +++ /dev/null @@ -1,616 +0,0 @@ -/* - * Copyright 2014-2018,2023 Con Kolivas - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 3 of the License, or (at your option) - * any later version. See COPYING for more details. - */ - -/* This file should contain all exported functions of libckpool */ - -#ifndef LIBCKPOOL_H -#define LIBCKPOOL_H - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if HAVE_BYTESWAP_H -# include -#endif - -#if HAVE_ENDIAN_H -# include -#elif HAVE_SYS_ENDIAN_H -# include -#endif - -#include -#include - -#include "utlist.h" - -#ifndef bswap_16 - #define bswap_16 __builtin_bswap16 - #define bswap_32 __builtin_bswap32 - #define bswap_64 __builtin_bswap64 -#endif - -/* This assumes htobe32 is a macro in endian.h, and if it doesn't exist, then - * htobe64 also won't exist */ -#ifndef htobe32 -# if __BYTE_ORDER == __LITTLE_ENDIAN -# define htole16(x) (x) -# define le16toh(x) (x) -# define htole32(x) (x) -# define htole64(x) (x) -# define le32toh(x) (x) -# define le64toh(x) (x) -# define be32toh(x) bswap_32(x) -# define be64toh(x) bswap_64(x) -# define htobe16(x) bswap_16(x) -# define htobe32(x) bswap_32(x) -# define htobe64(x) bswap_64(x) -# elif __BYTE_ORDER == __BIG_ENDIAN -# define htole16(x) bswap_16(x) -# define le16toh(x) bswap_16(x) -# define htole32(x) bswap_32(x) -# define le32toh(x) bswap_32(x) -# define le64toh(x) bswap_64(x) -# define htole64(x) bswap_64(x) -# define be32toh(x) (x) -# define be64toh(x) (x) -# define htobe16(x) (x) -# define htobe32(x) (x) -# define htobe64(x) (x) -# endif -#endif - -#define unlikely(expr) (__builtin_expect(!!(expr), 0)) -#define likely(expr) (__builtin_expect(!!(expr), 1)) -#define __maybe_unused __attribute__((unused)) -#define uninitialised_var(x) x = x - -#ifndef MAX -#define MAX(a,b) \ - ({ __typeof__ (a) _a = (a); \ - __typeof__ (b) _b = (b); \ - _a > _b ? _a : _b; }) -#endif -#ifndef MIN -#define MIN(a,b) \ - ({ __typeof__ (a) _a = (a); \ - __typeof__ (b) _b = (b); \ - _a < _b ? _a : _b; }) -#endif - -typedef unsigned char uchar; - -typedef struct timeval tv_t; -typedef struct timespec ts_t; - -static inline void swap_256(void *dest_p, const void *src_p) -{ - uint32_t *dest = dest_p; - const uint32_t *src = src_p; - - dest[0] = src[7]; - dest[1] = src[6]; - dest[2] = src[5]; - dest[3] = src[4]; - dest[4] = src[3]; - dest[5] = src[2]; - dest[6] = src[1]; - dest[7] = src[0]; -} - -static inline void bswap_256(void *dest_p, const void *src_p) -{ - uint32_t *dest = dest_p; - const uint32_t *src = src_p; - - dest[0] = bswap_32(src[7]); - dest[1] = bswap_32(src[6]); - dest[2] = bswap_32(src[5]); - dest[3] = bswap_32(src[4]); - dest[4] = bswap_32(src[3]); - dest[5] = bswap_32(src[2]); - dest[6] = bswap_32(src[1]); - dest[7] = bswap_32(src[0]); -} - -static inline void flip_32(void *dest_p, const void *src_p) -{ - uint32_t *dest = dest_p; - const uint32_t *src = src_p; - int i; - - for (i = 0; i < 8; i++) - dest[i] = bswap_32(src[i]); -} - -static inline void flip_80(void *dest_p, const void *src_p) -{ - uint32_t *dest = dest_p; - const uint32_t *src = src_p; - int i; - - for (i = 0; i < 20; i++) - dest[i] = bswap_32(src[i]); -} - -#define cond_wait(_cond, _lock) _cond_wait(_cond, _lock, __FILE__, __func__, __LINE__) -#define cond_timedwait(_cond, _lock, _abstime) _cond_timedwait(_cond, _lock, _abstime, __FILE__, __func__, __LINE__) -#define mutex_timedlock(_lock, _timeout) _mutex_timedlock(_lock, _timeout, __FILE__, __func__, __LINE__) -#define mutex_lock(_lock) _mutex_lock(_lock, __FILE__, __func__, __LINE__) -#define mutex_unlock_noyield(_lock) _mutex_unlock_noyield(_lock, __FILE__, __func__, __LINE__) -#define mutex_unlock(_lock) _mutex_unlock(_lock, __FILE__, __func__, __LINE__) -#define mutex_trylock(_lock) _mutex_trylock(_lock, __FILE__, __func__, __LINE__) -#define wr_lock(_lock) _wr_lock(_lock, __FILE__, __func__, __LINE__) -#define wr_trylock(_lock) _wr_trylock(_lock, __FILE__, __func__, __LINE__) -#define rd_lock(_lock) _rd_lock(_lock, __FILE__, __func__, __LINE__) -#define rw_unlock(_lock) _rw_unlock(_lock, __FILE__, __func__, __LINE__) -#define rd_unlock_noyield(_lock) _rd_unlock_noyield(_lock, __FILE__, __func__, __LINE__) -#define wr_unlock_noyield(_lock) _wr_unlock_noyield(_lock, __FILE__, __func__, __LINE__) -#define rd_unlock(_lock) _rd_unlock(_lock, __FILE__, __func__, __LINE__) -#define wr_unlock(_lock) _wr_unlock(_lock, __FILE__, __func__, __LINE__) -#define mutex_init(_lock) _mutex_init(_lock, __FILE__, __func__, __LINE__) -#define rwlock_init(_lock) _rwlock_init(_lock, __FILE__, __func__, __LINE__) -#define cond_init(_cond) _cond_init(_cond, __FILE__, __func__, __LINE__) - -#define cklock_init(_lock) _cklock_init(_lock, __FILE__, __func__, __LINE__) -#define ck_rlock(_lock) _ck_rlock(_lock, __FILE__, __func__, __LINE__) -#define ck_wlock(_lock) _ck_wlock(_lock, __FILE__, __func__, __LINE__) -#define ck_dwlock(_lock) _ck_dwlock(_lock, __FILE__, __func__, __LINE__) -#define ck_dlock(_lock) _ck_dlock(_lock, __FILE__, __func__, __LINE__) -#define ck_runlock(_lock) _ck_runlock(_lock, __FILE__, __func__, __LINE__) -#define ck_wunlock(_lock) _ck_wunlock(_lock, __FILE__, __func__, __LINE__) - -#define ckalloc(len) _ckalloc(len, __FILE__, __func__, __LINE__) -#define ckzalloc(len) _ckzalloc(len, __FILE__, __func__, __LINE__) - -#define dealloc(ptr) do { \ - free(ptr); \ - ptr = NULL; \ -} while (0) - -#define VASPRINTF(strp, fmt, ...) do { \ - if (unlikely(vasprintf(strp, fmt, ##__VA_ARGS__) < 0)) \ - quitfrom(1, __FILE__, __func__, __LINE__, "Failed to vasprintf"); \ -} while (0) - -#define ASPRINTF(strp, fmt, ...) do { \ - if (unlikely(asprintf(strp, fmt, ##__VA_ARGS__) < 0)) \ - quitfrom(1, __FILE__, __func__, __LINE__, "Failed to asprintf"); \ -} while (0) - -void logmsg(int loglevel, const char *fmt, ...); - -#define DEFLOGBUFSIZ 512 - -#define LOGMSGSIZ(__siz, __lvl, __fmt, ...) do { \ - char *BUF; \ - int LEN, OFFSET = 0; \ - ASPRINTF(&BUF, __fmt, ##__VA_ARGS__); \ - LEN = strlen(BUF); \ - while (LEN > 0) { \ - char tmp42[__siz] = {}; \ - int CPY = MIN(LEN, DEFLOGBUFSIZ - 2); \ - memcpy(tmp42, BUF + OFFSET, CPY); \ - logmsg(__lvl, "%s", tmp42);\ - OFFSET += CPY; \ - LEN -= OFFSET; \ - } \ - free(BUF); \ -} while(0) - -#define LOGMSG(_lvl, _fmt, ...) \ - LOGMSGSIZ(DEFLOGBUFSIZ, _lvl, _fmt, ##__VA_ARGS__) - -#define LOGEMERG(fmt, ...) LOGMSG(LOG_EMERG, fmt, ##__VA_ARGS__) -#define LOGALERT(fmt, ...) LOGMSG(LOG_ALERT, fmt, ##__VA_ARGS__) -#define LOGCRIT(fmt, ...) LOGMSG(LOG_CRIT, fmt, ##__VA_ARGS__) -#define LOGERR(fmt, ...) LOGMSG(LOG_ERR, fmt, ##__VA_ARGS__) -#define LOGWARNING(fmt, ...) LOGMSG(LOG_WARNING, fmt, ##__VA_ARGS__) -#define LOGNOTICE(fmt, ...) LOGMSG(LOG_NOTICE, fmt, ##__VA_ARGS__) -#define LOGINFO(fmt, ...) LOGMSG(LOG_INFO, fmt, ##__VA_ARGS__) -#define LOGDEBUG(fmt, ...) LOGMSG(LOG_DEBUG, fmt, ##__VA_ARGS__) - -#define IN_FMT_FFL " in %s %s():%d" -#define quitfrom(status, _file, _func, _line, fmt, ...) do { \ - if (fmt) { \ - fprintf(stderr, fmt IN_FMT_FFL, ##__VA_ARGS__, _file, _func, _line); \ - fprintf(stderr, "\n"); \ - fflush(stderr); \ - } \ - exit(status); \ -} while (0) - -#define quit(status, fmt, ...) do { \ - if (fmt) { \ - fprintf(stderr, fmt, ##__VA_ARGS__); \ - fprintf(stderr, "\n"); \ - fflush(stderr); \ - } \ - exit(status); \ -} while (0) - -#define PAGESIZE (4096) - -/* Default timeouts for unix socket reads and writes in seconds. Set write - * timeout to double the read timeout in case of one read blocking the next - * writer. */ -#define UNIX_READ_TIMEOUT 5 -#define UNIX_WRITE_TIMEOUT 10 - -#define MIN1 60 -#define MIN5 300 -#define MIN15 900 -#define HOUR 3600 -#define HOUR6 21600 -#define DAY 86400 -#define WEEK 604800 - -/* Share error values */ - -enum share_err { - SE_INVALID_NONCE2 = -9, - SE_WORKER_MISMATCH, - SE_NO_NONCE, - SE_NO_NTIME, - SE_NO_NONCE2, - SE_NO_JOBID, - SE_NO_USERNAME, - SE_INVALID_SIZE, - SE_NOT_ARRAY, - SE_NONE, // 0 - SE_INVALID_JOBID, - SE_STALE, - SE_NTIME_INVALID, - SE_DUPE, - SE_HIGH_DIFF, - SE_INVALID_VERSION_MASK -}; - -static const char __maybe_unused *share_errs[] = { - "Invalid nonce2 length", - "Worker mismatch", - "No nonce", - "No ntime", - "No nonce2", - "No job_id", - "No username", - "Invalid array size", - "Params not array", - "Valid", - "Invalid JobID", - "Stale", - "Ntime out of range", - "Duplicate", - "Above target", - "Invalid version mask" -}; - -#define SHARE_ERR(x) share_errs[((x) + 9)] - -typedef struct ckmutex mutex_t; - -struct ckmutex { - pthread_mutex_t mutex; - const char *file; - const char *func; - int line; -}; - -typedef struct ckrwlock rwlock_t; - -struct ckrwlock { - pthread_rwlock_t rwlock; - const char *file; - const char *func; - int line; -}; - -/* ck locks, a write biased variant of rwlocks */ -struct cklock { - mutex_t mutex; - rwlock_t rwlock; - const char *file; - const char *func; - int line; -}; - -typedef struct cklock cklock_t; - -struct unixsock { - int sockd; - char *path; -}; - -typedef struct unixsock unixsock_t; - -void _json_check(json_t *val, json_error_t *err, const char *file, const char *func, const int line); -#define json_check(VAL, ERR) _json_check(VAL, ERR, __FILE__, __func__, __LINE__) - -/* Check and pack json */ -#define JSON_CPACK(VAL, ...) do { \ - json_error_t ERR; \ - VAL = json_pack_ex(&ERR, 0, ##__VA_ARGS__); \ - json_check(VAL, &ERR); \ -} while (0) - -/* No error checking with these, make sure we know they're valid already! */ -static inline void json_strcpy(char *buf, json_t *val, const char *key) -{ - strcpy(buf, json_string_value(json_object_get(val, key)) ? : ""); -} - -static inline void json_dblcpy(double *dbl, json_t *val, const char *key) -{ - *dbl = json_real_value(json_object_get(val, key)); -} - -static inline void json_uintcpy(uint32_t *u32, json_t *val, const char *key) -{ - *u32 = (uint32_t)json_integer_value(json_object_get(val, key)); -} - -static inline void json_uint64cpy(uint64_t *u64, json_t *val, const char *key) -{ - *u64 = (uint64_t)json_integer_value(json_object_get(val, key)); -} - -static inline void json_int64cpy(int64_t *i64, json_t *val, const char *key) -{ - *i64 = (int64_t)json_integer_value(json_object_get(val, key)); -} - -static inline void json_intcpy(int *i, json_t *val, const char *key) -{ - *i = json_integer_value(json_object_get(val, key)); -} - -static inline void json_strdup(char **buf, json_t *val, const char *key) -{ - *buf = strdup(json_string_value(json_object_get(val, key)) ? : ""); -} - -/* Helpers for setting a field will check for valid entry and print an error - * if it is unsuccessfully set. */ -static inline void _json_set_string(json_t *val, const char *key, const char *str, - const char *file, const char *func, const int line) -{ - if (unlikely(json_object_set_new(val, key, json_string(str)))) - LOGERR("Failed to set json string from %s %s:%d", file, func, line); -} -#define json_set_string(val, key, str) _json_set_string(val, key, str, __FILE__, __func__, __LINE__) - -/* Int is long long so will work for u32 and int64 */ -static inline void _json_set_int(json_t *val, const char *key, int64_t integer, - const char *file, const char *func, const int line) -{ - if (unlikely(json_object_set_new_nocheck(val, key, json_integer(integer)))) - LOGERR("Failed to set json int from %s %s:%d", file, func, line); -} -#define json_set_int(val, key, integer) _json_set_int(val, key, integer, __FILE__, __func__, __LINE__) -#define json_set_uint32(val, key, u32) _json_set_int(val, key, u32, __FILE__, __func__, __LINE__) -#define json_set_int64(val, key, i64) _json_set_int(val, key, i64, __FILE__, __func__, __LINE__) - -static inline void _json_set_double(json_t *val, const char *key, double real, - const char *file, const char *func, const int line) -{ - if (unlikely(json_object_set_new_nocheck(val, key, json_real(real)))) - LOGERR("Failed to set json double from %s %s:%d", file, func, line); -} -#define json_set_double(val, key, real) _json_set_double(val, key, real, __FILE__, __func__, __LINE__) - -static inline void _json_set_bool(json_t *val, const char *key, bool boolean, - const char *file, const char *func, const int line) -{ - if (unlikely(json_object_set_new_nocheck(val, key, json_boolean(boolean)))) - LOGERR("Failed to set json bool from %s %s:%d", file, func, line); -} -#define json_set_bool(val, key, boolean) _json_set_bool(val, key, boolean, __FILE__, __func__, __LINE__) - - -static inline void _json_set_object(json_t *val, const char *key, json_t *object, - const char *file, const char *func, const int line) -{ - if (unlikely(json_object_set_new_nocheck(val, key, object))) - LOGERR("Failed to set json object from %s %s:%d", file, func, line); -} -#define json_set_object(val, key, object) _json_set_object(val, key, object, __FILE__, __func__, __LINE__) - -void rename_proc(const char *name); -void create_pthread(pthread_t *thread, void *(*start_routine)(void *), void *arg); -void join_pthread(pthread_t thread); -bool ck_completion_timeout(void *fn, void *fnarg, int timeout); - -int _cond_wait(pthread_cond_t *cond, mutex_t *lock, const char *file, const char *func, const int line); -int _cond_timedwait(pthread_cond_t *cond, mutex_t *lock, const struct timespec *abstime, const char *file, const char *func, const int line); -int _mutex_timedlock(mutex_t *lock, int timeout, const char *file, const char *func, const int line); -void _mutex_lock(mutex_t *lock, const char *file, const char *func, const int line); -void _mutex_unlock_noyield(mutex_t *lock, const char *file, const char *func, const int line); -void _mutex_unlock(mutex_t *lock, const char *file, const char *func, const int line); -int _mutex_trylock(mutex_t *lock, __maybe_unused const char *file, __maybe_unused const char *func, __maybe_unused const int line); -void mutex_destroy(mutex_t *lock); - -void _wr_lock(rwlock_t *lock, const char *file, const char *func, const int line); -int _wr_trylock(rwlock_t *lock, __maybe_unused const char *file, __maybe_unused const char *func, __maybe_unused const int line); -void _rd_lock(rwlock_t *lock, const char *file, const char *func, const int line); -void _rw_unlock(rwlock_t *lock, const char *file, const char *func, const int line); -void _rd_unlock_noyield(rwlock_t *lock, const char *file, const char *func, const int line); -void _wr_unlock_noyield(rwlock_t *lock, const char *file, const char *func, const int line); -void _rd_unlock(rwlock_t *lock, const char *file, const char *func, const int line); -void _wr_unlock(rwlock_t *lock, const char *file, const char *func, const int line); -void _mutex_init(mutex_t *lock, const char *file, const char *func, const int line); -void _rwlock_init(rwlock_t *lock, const char *file, const char *func, const int line); -void _cond_init(pthread_cond_t *cond, const char *file, const char *func, const int line); - -void _cklock_init(cklock_t *lock, const char *file, const char *func, const int line); -void _ck_rlock(cklock_t *lock, const char *file, const char *func, const int line); -void _ck_ilock(cklock_t *lock, const char *file, const char *func, const int line); -void _ck_uilock(cklock_t *lock, const char *file, const char *func, const int line); -void _ck_ulock(cklock_t *lock, const char *file, const char *func, const int line); -void _ck_wlock(cklock_t *lock, const char *file, const char *func, const int line); -void _ck_dwlock(cklock_t *lock, const char *file, const char *func, const int line); -void _ck_dwilock(cklock_t *lock, const char *file, const char *func, const int line); -void _ck_dlock(cklock_t *lock, const char *file, const char *func, const int line); -void _ck_runlock(cklock_t *lock, const char *file, const char *func, const int line); -void _ck_wunlock(cklock_t *lock, const char *file, const char *func, const int line); -void cklock_destroy(cklock_t *lock); - -void _cksem_init(sem_t *sem, const char *file, const char *func, const int line); -void _cksem_post(sem_t *sem, const char *file, const char *func, const int line); -void _cksem_wait(sem_t *sem, const char *file, const char *func, const int line); -int _cksem_trywait(sem_t *sem, const char *file, const char *func, const int line); -int _cksem_mswait(sem_t *sem, int ms, const char *file, const char *func, const int line); -void _cksem_destroy(sem_t *sem, const char *file, const char *func, const int line); - -#define cksem_init(SEM) _cksem_init(SEM, __FILE__, __func__, __LINE__) -#define cksem_post(SEM) _cksem_post(SEM, __FILE__, __func__, __LINE__) -#define cksem_wait(SEM) _cksem_wait(SEM, __FILE__, __func__, __LINE__) -#define cksem_trywait(SEM) _cksem_trywait(SEM, __FILE__, __func__, __LINE__) -#define cksem_mswait(SEM, _timeout) _cksem_mswait(SEM, _timeout, __FILE__, __func__, __LINE__) -#define cksem_destroy(SEM) _cksem_destroy(SEM, __FILE__, __func__, __LINE__) - -static inline bool sock_connecting(void) -{ - return errno == EINPROGRESS; -} - -static inline bool sock_blocks(void) -{ - return (errno == EAGAIN || errno == EWOULDBLOCK); -} - -static inline bool sock_timeout(void) -{ - return (errno == ETIMEDOUT); -} - -bool extract_sockaddr(char *url, char **sockaddr_url, char **sockaddr_port); -bool url_from_sockaddr(const struct sockaddr *addr, char *url, char *port); -bool addrinfo_from_url(const char *url, const char *port, struct addrinfo *addrinfo); -bool url_from_serverurl(char *serverurl, char *newurl, char *newport); -bool url_from_socket(const int sockd, char *url, char *port); - -void keep_sockalive(int fd); -void nolinger_socket(int fd); -void noblock_socket(int fd); -void block_socket(int fd); -void _close(int *fd, const char *file, const char *func, const int line); -#define _Close(FD) _close(FD, __FILE__, __func__, __LINE__) -#define Close(FD) _close(&FD, __FILE__, __func__, __LINE__) -int bind_socket(char *url, char *port); -int connect_socket(char *url, char *port); -int round_trip(char *url); -int write_socket(int fd, const void *buf, size_t nbyte); -void empty_socket(int fd); -void _close_unix_socket(int *sockd, const char *server_path); -#define close_unix_socket(sockd, server_path) _close_unix_socket(&sockd, server_path) -int _open_unix_server(const char *server_path, const char *file, const char *func, const int line); -#define open_unix_server(server_path) _open_unix_server(server_path, __FILE__, __func__, __LINE__) -int _open_unix_client(const char *server_path, const char *file, const char *func, const int line); -#define open_unix_client(server_path) _open_unix_client(server_path, __FILE__, __func__, __LINE__) -int wait_close(int sockd, int timeout); -int wait_read_select(int sockd, float timeout); -int read_length(int sockd, void *buf, int len); -char *_recv_unix_msg(int sockd, int timeout1, int timeout2, const char *file, const char *func, const int line); -#define RECV_UNIX_TIMEOUT1 30 -#define RECV_UNIX_TIMEOUT2 5 -#define recv_unix_msg(sockd) _recv_unix_msg(sockd, UNIX_READ_TIMEOUT, UNIX_READ_TIMEOUT, __FILE__, __func__, __LINE__) -#define recv_unix_msg_tmo(sockd, tmo) _recv_unix_msg(sockd, tmo, UNIX_READ_TIMEOUT, __FILE__, __func__, __LINE__) -#define recv_unix_msg_tmo2(sockd, tmo1, tmo2) _recv_unix_msg(sockd, tmo1, tmo2, __FILE__, __func__, __LINE__) -int wait_write_select(int sockd, float timeout); -#define write_length(sockd, buf, len) _write_length(sockd, buf, len, __FILE__, __func__, __LINE__) -int _write_length(int sockd, const void *buf, int len, const char *file, const char *func, const int line); -bool _send_unix_msg(int sockd, const char *buf, int timeout, const char *file, const char *func, const int line); -#define send_unix_msg(sockd, buf) _send_unix_msg(sockd, buf, UNIX_WRITE_TIMEOUT, __FILE__, __func__, __LINE__) -bool _send_unix_data(int sockd, const struct msghdr *msg, const char *file, const char *func, const int line); -#define send_unix_data(sockd, msg) _send_unix_data(sockd, msg, __FILE__, __func__, __LINE__) -bool _recv_unix_data(int sockd, struct msghdr *msg, const char *file, const char *func, const int line); -#define recv_unix_data(sockd, msg) _recv_unix_data(sockd, msg, __FILE__, __func__, __LINE__) -bool _send_fd(int fd, int sockd, const char *file, const char *func, const int line); -#define send_fd(fd, sockd) _send_fd(fd, sockd, __FILE__, __func__, __LINE__) -int _get_fd(int sockd, const char *file, const char *func, const int line); -#define get_fd(sockd) _get_fd(sockd, __FILE__, __func__, __LINE__) - -const char *__json_array_string(json_t *val, unsigned int entry); -char *json_array_string(json_t *val, unsigned int entry); -json_t *json_object_dup(json_t *val, const char *entry); - -char *rotating_filename(const char *path, time_t when); -bool rotating_log(const char *path, const char *msg); - -void align_len(size_t *len); -void realloc_strcat(char **ptr, const char *s); -void trail_slash(char **buf); -void *_ckalloc(size_t len, const char *file, const char *func, const int line); -void *json_ckalloc(size_t size); -void *_ckzalloc(size_t len, const char *file, const char *func, const int line); -size_t round_up_page(size_t len); - -extern const int hex2bin_tbl[]; -void __bin2hex(void *vs, const void *vp, size_t len); -void *bin2hex(const void *vp, size_t len); -bool _validhex(const char *buf, const char *file, const char *func, const int line); -#define validhex(buf) _validhex(buf, __FILE__, __func__, __LINE__) -bool _hex2bin(void *p, const void *vhexstr, size_t len, const char *file, const char *func, const int line); -#define hex2bin(p, vhexstr, len) _hex2bin(p, vhexstr, len, __FILE__, __func__, __LINE__) -char *http_base64(const char *src); -void b58tobin(char *b58bin, const char *b58); -int safecmp(const char *a, const char *b); -bool cmdmatch(const char *buf, const char *cmd); - -int address_to_txn(char *p2h, const char *addr, const bool script, const bool segwit); -int ser_number(uchar *s, int32_t val); -int get_sernumber(uchar *s); -bool fulltest(const uchar *hash, const uchar *target); - -void copy_tv(tv_t *dest, const tv_t *src); -void ts_to_tv(tv_t *val, const ts_t *spec); -void tv_to_ts(ts_t *spec, const tv_t *val); -void us_to_tv(tv_t *val, int64_t us); -void us_to_ts(ts_t *spec, int64_t us); -void ms_to_ts(ts_t *spec, int64_t ms); -void ms_to_tv(tv_t *val, int64_t ms); -void tv_time(tv_t *tv); -void ts_realtime(ts_t *ts); - -void cksleep_prepare_r(ts_t *ts); -void nanosleep_abstime(ts_t *ts_end); -void timeraddspec(ts_t *a, const ts_t *b); -void cksleep_ms_r(ts_t *ts_start, int ms); -void cksleep_us_r(ts_t *ts_start, int64_t us); -void cksleep_ms(int ms); -void cksleep_us(int64_t us); - -double us_tvdiff(tv_t *end, tv_t *start); -int ms_tvdiff(tv_t *end, tv_t *start); -double tvdiff(tv_t *end, tv_t *start); - -void decay_time(double *f, double fadd, double fsecs, double interval); -double sane_tdiff(tv_t *end, tv_t *start); -void suffix_string(double val, char *buf, size_t bufsiz, int sigdigits); - -double le256todouble(const uchar *target); -double be256todouble(const uchar *target); -double diff_from_target(uchar *target); -double diff_from_betarget(uchar *target); -double diff_from_nbits(char *nbits); -void target_from_diff(uchar *target, double diff); - -void gen_hash(uchar *data, uchar *hash, int len); - -#endif /* LIBCKPOOL_H */ diff --git a/solo-ckpool-source/src/notifier.c b/solo-ckpool-source/src/notifier.c deleted file mode 100644 index 8157a4b..0000000 --- a/solo-ckpool-source/src/notifier.c +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright 2014-2016 Con Kolivas - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 3 of the License, or (at your option) - * any later version. See COPYING for more details. - */ - -#include "config.h" - -#include -#include -#include - -#include "libckpool.h" - -int main(int argc, char **argv) -{ - char *name = NULL, *socket_dir = NULL; - bool proxy = false; - int c, sockd; - - while ((c = getopt(argc, argv, "n:s:p")) != -1) { - switch(c) { - case 'n': - name = strdup(optarg); - break; - case 's': - socket_dir = strdup(optarg); - break; - case 'p': - proxy = true; - break; - } - } - if (!socket_dir) - socket_dir = strdup("/tmp"); - trail_slash(&socket_dir); - if (!name) { - if (proxy) - name = strdup("ckproxy"); - else - name = strdup("ckpool"); - } - realloc_strcat(&socket_dir, name); - dealloc(name); - trail_slash(&socket_dir); - realloc_strcat(&socket_dir, "stratifier"); - sockd = open_unix_client(socket_dir); - if (sockd < 0) { - LOGERR("Failed to open socket: %s", socket_dir); - exit(1); - } - if (!send_unix_msg(sockd, "update")) { - LOGERR("Failed to send stratifier update msg"); - exit(1); - } - LOGNOTICE("Notified stratifier of block update"); - exit(0); -} - - diff --git a/solo-ckpool-source/src/sha2.c b/solo-ckpool-source/src/sha2.c deleted file mode 100644 index f82356b..0000000 --- a/solo-ckpool-source/src/sha2.c +++ /dev/null @@ -1,236 +0,0 @@ -/* - * FIPS 180-2 SHA-224/256/384/512 implementation - * Last update: 02/02/2007 - * Issue date: 04/30/2005 - * - * Copyright (C) 2013-2016, Con Kolivas - * Copyright (C) 2005, 2007 Olivier Gay - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the project nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#include "config.h" - -#include -#include - -#include "sha2.h" - -#define UNPACK32(x, str) \ -{ \ - *((str) + 3) = (uint8_t) ((x) ); \ - *((str) + 2) = (uint8_t) ((x) >> 8); \ - *((str) + 1) = (uint8_t) ((x) >> 16); \ - *((str) + 0) = (uint8_t) ((x) >> 24); \ -} - -#define PACK32(str, x) \ -{ \ - *(x) = ((uint32_t) *((str) + 3) ) \ - | ((uint32_t) *((str) + 2) << 8) \ - | ((uint32_t) *((str) + 1) << 16) \ - | ((uint32_t) *((str) + 0) << 24); \ -} - -#define SHA256_SCR(i) \ -{ \ - w[i] = SHA256_F4(w[i - 2]) + w[i - 7] \ - + SHA256_F3(w[i - 15]) + w[i - 16]; \ -} - -uint32_t sha256_h0[8] = - {0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, - 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19}; - -uint32_t sha256_k[64] = - {0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, - 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, - 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, - 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, - 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, - 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, - 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, - 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, - 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, - 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, - 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, - 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, - 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, - 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, - 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, - 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2}; - -/* SHA-256 functions */ - -#ifdef USE_AVX2 -extern void sha256_rorx(const void *, uint32_t[8], uint64_t); - -void sha256_transf(sha256_ctx *ctx, const unsigned char *message, - unsigned int block_nb) -{ - sha256_rorx(message, ctx->h, block_nb); -} -#elif defined(USE_AVX1) -extern void sha256_avx(const unsigned char *, uint32_t[8], uint64_t); - -void sha256_transf(sha256_ctx *ctx, const unsigned char *message, - unsigned int block_nb) -{ - sha256_avx(message, ctx->h, block_nb); -} -#elif defined(USE_SSE4) -extern void sha256_sse4(const unsigned char *, uint32_t[8], uint64_t); - -void sha256_transf(sha256_ctx *ctx, const unsigned char *message, - unsigned int block_nb) -{ - sha256_sse4(message, ctx->h, block_nb); -} -#else -void sha256_transf(sha256_ctx *ctx, const unsigned char *message, - unsigned int block_nb) -{ - uint32_t w[64]; - uint32_t wv[8]; - uint32_t t1, t2; - const unsigned char *sub_block; - int i; - - int j; - - for (i = 0; i < (int) block_nb; i++) { - sub_block = message + (i << 6); - - for (j = 0; j < 16; j++) { - PACK32(&sub_block[j << 2], &w[j]); - } - - for (j = 16; j < 64; j++) { - SHA256_SCR(j); - } - - for (j = 0; j < 8; j++) { - wv[j] = ctx->h[j]; - } - - for (j = 0; j < 64; j++) { - t1 = wv[7] + SHA256_F2(wv[4]) + CH(wv[4], wv[5], wv[6]) - + sha256_k[j] + w[j]; - t2 = SHA256_F1(wv[0]) + MAJ(wv[0], wv[1], wv[2]); - wv[7] = wv[6]; - wv[6] = wv[5]; - wv[5] = wv[4]; - wv[4] = wv[3] + t1; - wv[3] = wv[2]; - wv[2] = wv[1]; - wv[1] = wv[0]; - wv[0] = t1 + t2; - } - - for (j = 0; j < 8; j++) { - ctx->h[j] += wv[j]; - } - } -} -#endif -void sha256(const unsigned char *message, unsigned int len, unsigned char *digest) -{ - sha256_ctx ctx; - - sha256_init(&ctx); - sha256_update(&ctx, message, len); - sha256_final(&ctx, digest); -} - -void sha256_init(sha256_ctx *ctx) -{ - int i; - for (i = 0; i < 8; i++) { - ctx->h[i] = sha256_h0[i]; - } - - ctx->len = 0; - ctx->tot_len = 0; -} - -void sha256_update(sha256_ctx *ctx, const unsigned char *message, - unsigned int len) -{ - unsigned int block_nb; - unsigned int new_len, rem_len, tmp_len; - const unsigned char *shifted_message; - - tmp_len = SHA256_BLOCK_SIZE - ctx->len; - rem_len = len < tmp_len ? len : tmp_len; - - memcpy(&ctx->block[ctx->len], message, rem_len); - - if (ctx->len + len < SHA256_BLOCK_SIZE) { - ctx->len += len; - return; - } - - new_len = len - rem_len; - block_nb = new_len / SHA256_BLOCK_SIZE; - - shifted_message = message + rem_len; - - sha256_transf(ctx, ctx->block, 1); - sha256_transf(ctx, shifted_message, block_nb); - - rem_len = new_len % SHA256_BLOCK_SIZE; - - memcpy(ctx->block, &shifted_message[block_nb << 6], - rem_len); - - ctx->len = rem_len; - ctx->tot_len += (block_nb + 1) << 6; -} - -void sha256_final(sha256_ctx *ctx, unsigned char *digest) -{ - unsigned int block_nb; - unsigned int pm_len; - unsigned int len_b; - - int i; - - block_nb = (1 + ((SHA256_BLOCK_SIZE - 9) - < (ctx->len % SHA256_BLOCK_SIZE))); - - len_b = (ctx->tot_len + ctx->len) << 3; - pm_len = block_nb << 6; - - memset(ctx->block + ctx->len, 0, pm_len - ctx->len); - ctx->block[ctx->len] = 0x80; - UNPACK32(len_b, ctx->block + pm_len - 4); - - sha256_transf(ctx, ctx->block, block_nb); - - for (i = 0 ; i < 8; i++) { - UNPACK32(ctx->h[i], &digest[i << 2]); - } -} diff --git a/solo-ckpool-source/src/sha2.h b/solo-ckpool-source/src/sha2.h deleted file mode 100644 index d470112..0000000 --- a/solo-ckpool-source/src/sha2.h +++ /dev/null @@ -1,69 +0,0 @@ -/* - * FIPS 180-2 SHA-224/256/384/512 implementation - * Last update: 02/02/2007 - * Issue date: 04/30/2005 - * - * Copyright (C) 2013-2014, Con Kolivas - * Copyright (C) 2005, 2007 Olivier Gay - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the project nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#include "config.h" - -#ifndef SHA2_H -#define SHA2_H - -#define SHA256_DIGEST_SIZE ( 256 / 8) -#define SHA256_BLOCK_SIZE ( 512 / 8) - -#define SHFR(x, n) (x >> n) -#define ROTR(x, n) ((x >> n) | (x << ((sizeof(x) << 3) - n))) -#define CH(x, y, z) ((x & y) ^ (~x & z)) -#define MAJ(x, y, z) ((x & y) ^ (x & z) ^ (y & z)) - -#define SHA256_F1(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22)) -#define SHA256_F2(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25)) -#define SHA256_F3(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SHFR(x, 3)) -#define SHA256_F4(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SHFR(x, 10)) - -typedef struct { - unsigned int tot_len; - unsigned int len; - unsigned char block[2 * SHA256_BLOCK_SIZE]; - uint32_t h[8]; -} sha256_ctx; - -extern uint32_t sha256_k[64]; - -void sha256_init(sha256_ctx * ctx); -void sha256_update(sha256_ctx *ctx, const unsigned char *message, - unsigned int len); -void sha256_final(sha256_ctx *ctx, unsigned char *digest); -void sha256(const unsigned char *message, unsigned int len, - unsigned char *digest); - -#endif /* !SHA2_H */ diff --git a/solo-ckpool-source/src/sha256_code_release/open_software_license.txt b/solo-ckpool-source/src/sha256_code_release/open_software_license.txt deleted file mode 100644 index 44a2002..0000000 --- a/solo-ckpool-source/src/sha256_code_release/open_software_license.txt +++ /dev/null @@ -1,32 +0,0 @@ -Copyright (c) 2012, Intel Corporation - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -* Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the - distribution. - -* Neither the name of the Intel Corporation nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - -THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY -EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/solo-ckpool-source/src/sha256_code_release/sha256_avx1.asm b/solo-ckpool-source/src/sha256_code_release/sha256_avx1.asm deleted file mode 100644 index 7dcafaa..0000000 --- a/solo-ckpool-source/src/sha256_code_release/sha256_avx1.asm +++ /dev/null @@ -1,588 +0,0 @@ -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -; Copyright (c) 2012, Intel Corporation -; -; All rights reserved. -; -; Redistribution and use in source and binary forms, with or without -; modification, are permitted provided that the following conditions are -; met: -; -; * Redistributions of source code must retain the above copyright -; notice, this list of conditions and the following disclaimer. -; -; * Redistributions in binary form must reproduce the above copyright -; notice, this list of conditions and the following disclaimer in the -; documentation and/or other materials provided with the -; distribution. -; -; * Neither the name of the Intel Corporation nor the names of its -; contributors may be used to endorse or promote products derived from -; this software without specific prior written permission. -; -; -; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY -; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR -; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -; -; Example YASM command lines: -; Windows: yasm -Xvc -f x64 -rnasm -pnasm -o sha256_avx1.obj -g cv8 sha256_avx1.asm -; Linux: yasm -f x64 -f elf64 -X gnu -g dwarf2 -D LINUX -o sha256_avx1.o sha256_avx1.asm -; -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -; -; This code is described in an Intel White-Paper: -; "Fast SHA-256 Implementations on Intel Architecture Processors" -; -; To find it, surf to http://www.intel.com/p/en_US/embedded -; and search for that title. -; The paper is expected to be released roughly at the end of April, 2012 -; -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -; This code schedules 1 blocks at a time, with 4 lanes per block -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -%define VMOVDQ vmovdqu ;; assume buffers not aligned - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros - -; addm [mem], reg -; Add reg to mem using reg-mem add and store -%macro addm 2 - add %2, %1 - mov %1, %2 -%endm - -%macro MY_ROR 2 - shld %1,%1,(32-(%2)) -%endm - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -; COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask -; Load xmm with mem and byte swap each dword -%macro COPY_XMM_AND_BSWAP 3 - VMOVDQ %1, %2 - vpshufb %1, %1, %3 -%endmacro - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -%define X0 xmm4 -%define X1 xmm5 -%define X2 xmm6 -%define X3 xmm7 - -%define XTMP0 xmm0 -%define XTMP1 xmm1 -%define XTMP2 xmm2 -%define XTMP3 xmm3 -%define XTMP4 xmm8 -%define XFER xmm9 -%define XTMP5 xmm11 - -%define SHUF_00BA xmm10 ; shuffle xBxA -> 00BA -%define SHUF_DC00 xmm12 ; shuffle xDxC -> DC00 -%define BYTE_FLIP_MASK xmm13 - -%ifdef LINUX -%define NUM_BLKS rdx ; 3rd arg -%define CTX rsi ; 2nd arg -%define INP rdi ; 1st arg - -%define SRND rdi ; clobbers INP -%define c ecx -%define d r8d -%define e edx -%else -%define NUM_BLKS r8 ; 3rd arg -%define CTX rdx ; 2nd arg -%define INP rcx ; 1st arg - -%define SRND rcx ; clobbers INP -%define c edi -%define d esi -%define e r8d - -%endif -%define TBL rbp -%define a eax -%define b ebx - -%define f r9d -%define g r10d -%define h r11d - -%define y0 r13d -%define y1 r14d -%define y2 r15d - - -_INP_END_SIZE equ 8 -_INP_SIZE equ 8 -_XFER_SIZE equ 8 -%ifdef LINUX -_XMM_SAVE_SIZE equ 0 -%else -_XMM_SAVE_SIZE equ 8*16 -%endif -; STACK_SIZE plus pushes must be an odd multiple of 8 -_ALIGN_SIZE equ 8 - -_INP_END equ 0 -_INP equ _INP_END + _INP_END_SIZE -_XFER equ _INP + _INP_SIZE -_XMM_SAVE equ _XFER + _XFER_SIZE + _ALIGN_SIZE -STACK_SIZE equ _XMM_SAVE + _XMM_SAVE_SIZE - -; rotate_Xs -; Rotate values of symbols X0...X3 -%macro rotate_Xs 0 -%xdefine X_ X0 -%xdefine X0 X1 -%xdefine X1 X2 -%xdefine X2 X3 -%xdefine X3 X_ -%endm - -; ROTATE_ARGS -; Rotate values of symbols a...h -%macro ROTATE_ARGS 0 -%xdefine TMP_ h -%xdefine h g -%xdefine g f -%xdefine f e -%xdefine e d -%xdefine d c -%xdefine c b -%xdefine b a -%xdefine a TMP_ -%endm - -%macro FOUR_ROUNDS_AND_SCHED 0 - ;; compute s0 four at a time and s1 two at a time - ;; compute W[-16] + W[-7] 4 at a time - ;vmovdqa XTMP0, X3 - mov y0, e ; y0 = e - MY_ROR y0, (25-11) ; y0 = e >> (25-11) - mov y1, a ; y1 = a - vpalignr XTMP0, X3, X2, 4 ; XTMP0 = W[-7] - MY_ROR y1, (22-13) ; y1 = a >> (22-13) - xor y0, e ; y0 = e ^ (e >> (25-11)) - mov y2, f ; y2 = f - MY_ROR y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6)) - ;vmovdqa XTMP1, X1 - xor y1, a ; y1 = a ^ (a >> (22-13) - xor y2, g ; y2 = f^g - vpaddd XTMP0, XTMP0, X0 ; XTMP0 = W[-7] + W[-16] - xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - and y2, e ; y2 = (f^g)&e - MY_ROR y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2)) - ;; compute s0 - vpalignr XTMP1, X1, X0, 4 ; XTMP1 = W[-15] - xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - MY_ROR y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - xor y2, g ; y2 = CH = ((f^g)&e)^g - - - MY_ROR y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - add y2, y0 ; y2 = S1 + CH - add y2, [rsp + _XFER + 0*4] ; y2 = k + w + S1 + CH - - mov y0, a ; y0 = a - add h, y2 ; h = h + S1 + CH + k + w - mov y2, a ; y2 = a - - vpsrld XTMP2, XTMP1, 7 - - or y0, c ; y0 = a|c - add d, h ; d = d + h + S1 + CH + k + w - and y2, c ; y2 = a&c - - vpslld XTMP3, XTMP1, (32-7) - - and y0, b ; y0 = (a|c)&b - add h, y1 ; h = h + S1 + CH + k + w + S0 - - vpor XTMP3, XTMP3, XTMP2 ; XTMP1 = W[-15] MY_ROR 7 - - or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c) - add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ - -ROTATE_ARGS - - mov y0, e ; y0 = e - mov y1, a ; y1 = a - - - MY_ROR y0, (25-11) ; y0 = e >> (25-11) - xor y0, e ; y0 = e ^ (e >> (25-11)) - mov y2, f ; y2 = f - MY_ROR y1, (22-13) ; y1 = a >> (22-13) - - vpsrld XTMP2, XTMP1,18 - - xor y1, a ; y1 = a ^ (a >> (22-13) - MY_ROR y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6)) - xor y2, g ; y2 = f^g - - vpsrld XTMP4, XTMP1, 3 ; XTMP4 = W[-15] >> 3 - - MY_ROR y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2)) - xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - and y2, e ; y2 = (f^g)&e - MY_ROR y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - - vpslld XTMP1, XTMP1, (32-18) - - xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - xor y2, g ; y2 = CH = ((f^g)&e)^g - - vpxor XTMP3, XTMP3, XTMP1 - - add y2, y0 ; y2 = S1 + CH - add y2, [rsp + _XFER + 1*4] ; y2 = k + w + S1 + CH - MY_ROR y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - - vpxor XTMP3, XTMP3, XTMP2 ; XTMP1 = W[-15] MY_ROR 7 ^ W[-15] MY_ROR 18 - - mov y0, a ; y0 = a - add h, y2 ; h = h + S1 + CH + k + w - mov y2, a ; y2 = a - - vpxor XTMP1, XTMP3, XTMP4 ; XTMP1 = s0 - - or y0, c ; y0 = a|c - add d, h ; d = d + h + S1 + CH + k + w - and y2, c ; y2 = a&c - ;; compute low s1 - vpshufd XTMP2, X3, 11111010b ; XTMP2 = W[-2] {BBAA} - and y0, b ; y0 = (a|c)&b - add h, y1 ; h = h + S1 + CH + k + w + S0 - vpaddd XTMP0, XTMP0, XTMP1 ; XTMP0 = W[-16] + W[-7] + s0 - or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c) - add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ - -ROTATE_ARGS - ;vmovdqa XTMP3, XTMP2 ; XTMP3 = W[-2] {BBAA} - - mov y0, e ; y0 = e - mov y1, a ; y1 = a - MY_ROR y0, (25-11) ; y0 = e >> (25-11) - - ;vmovdqa XTMP4, XTMP2 ; XTMP4 = W[-2] {BBAA} - - xor y0, e ; y0 = e ^ (e >> (25-11)) - MY_ROR y1, (22-13) ; y1 = a >> (22-13) - mov y2, f ; y2 = f - xor y1, a ; y1 = a ^ (a >> (22-13) - MY_ROR y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6)) - - vpsrld XTMP4, XTMP2, 10 ; XTMP4 = W[-2] >> 10 {BBAA} - - xor y2, g ; y2 = f^g - - vpsrlq XTMP3, XTMP2, 19 ; XTMP3 = W[-2] MY_ROR 19 {xBxA} - - xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - and y2, e ; y2 = (f^g)&e - - vpsrlq XTMP2, XTMP2, 17 ; XTMP2 = W[-2] MY_ROR 17 {xBxA} - - MY_ROR y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2)) - xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - xor y2, g ; y2 = CH = ((f^g)&e)^g - MY_ROR y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - vpxor XTMP2, XTMP2, XTMP3 - add y2, y0 ; y2 = S1 + CH - MY_ROR y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - add y2, [rsp + _XFER + 2*4] ; y2 = k + w + S1 + CH - vpxor XTMP4, XTMP4, XTMP2 ; XTMP4 = s1 {xBxA} - mov y0, a ; y0 = a - add h, y2 ; h = h + S1 + CH + k + w - mov y2, a ; y2 = a - vpshufb XTMP4, XTMP4, SHUF_00BA ; XTMP4 = s1 {00BA} - or y0, c ; y0 = a|c - add d, h ; d = d + h + S1 + CH + k + w - and y2, c ; y2 = a&c - vpaddd XTMP0, XTMP0, XTMP4 ; XTMP0 = {..., ..., W[1], W[0]} - and y0, b ; y0 = (a|c)&b - add h, y1 ; h = h + S1 + CH + k + w + S0 - ;; compute high s1 - vpshufd XTMP2, XTMP0, 01010000b ; XTMP2 = W[-2] {DDCC} - or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c) - add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ - -ROTATE_ARGS - ;vmovdqa XTMP3, XTMP2 ; XTMP3 = W[-2] {DDCC} - mov y0, e ; y0 = e - MY_ROR y0, (25-11) ; y0 = e >> (25-11) - mov y1, a ; y1 = a - ;vmovdqa XTMP5, XTMP2 ; XTMP5 = W[-2] {DDCC} - MY_ROR y1, (22-13) ; y1 = a >> (22-13) - xor y0, e ; y0 = e ^ (e >> (25-11)) - mov y2, f ; y2 = f - MY_ROR y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6)) - - vpsrld XTMP5, XTMP2, 10 ; XTMP5 = W[-2] >> 10 {DDCC} - - xor y1, a ; y1 = a ^ (a >> (22-13) - xor y2, g ; y2 = f^g - - vpsrlq XTMP3, XTMP2, 19 ; XTMP3 = W[-2] MY_ROR 19 {xDxC} - - xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - and y2, e ; y2 = (f^g)&e - MY_ROR y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2)) - - vpsrlq XTMP2, XTMP2, 17 ; XTMP2 = W[-2] MY_ROR 17 {xDxC} - - xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - MY_ROR y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - xor y2, g ; y2 = CH = ((f^g)&e)^g - - vpxor XTMP2, XTMP2, XTMP3 - - MY_ROR y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - add y2, y0 ; y2 = S1 + CH - add y2, [rsp + _XFER + 3*4] ; y2 = k + w + S1 + CH - vpxor XTMP5, XTMP5, XTMP2 ; XTMP5 = s1 {xDxC} - mov y0, a ; y0 = a - add h, y2 ; h = h + S1 + CH + k + w - mov y2, a ; y2 = a - vpshufb XTMP5, XTMP5, SHUF_DC00 ; XTMP5 = s1 {DC00} - or y0, c ; y0 = a|c - add d, h ; d = d + h + S1 + CH + k + w - and y2, c ; y2 = a&c - vpaddd X0, XTMP5, XTMP0 ; X0 = {W[3], W[2], W[1], W[0]} - and y0, b ; y0 = (a|c)&b - add h, y1 ; h = h + S1 + CH + k + w + S0 - or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c) - add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ - -ROTATE_ARGS -rotate_Xs -%endm - -;; input is [rsp + _XFER + %1 * 4] -%macro DO_ROUND 1 - mov y0, e ; y0 = e - MY_ROR y0, (25-11) ; y0 = e >> (25-11) - mov y1, a ; y1 = a - xor y0, e ; y0 = e ^ (e >> (25-11)) - MY_ROR y1, (22-13) ; y1 = a >> (22-13) - mov y2, f ; y2 = f - xor y1, a ; y1 = a ^ (a >> (22-13) - MY_ROR y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6)) - xor y2, g ; y2 = f^g - xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - MY_ROR y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2)) - and y2, e ; y2 = (f^g)&e - xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - MY_ROR y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - xor y2, g ; y2 = CH = ((f^g)&e)^g - add y2, y0 ; y2 = S1 + CH - MY_ROR y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - add y2, [rsp + _XFER + %1 * 4] ; y2 = k + w + S1 + CH - mov y0, a ; y0 = a - add h, y2 ; h = h + S1 + CH + k + w - mov y2, a ; y2 = a - or y0, c ; y0 = a|c - add d, h ; d = d + h + S1 + CH + k + w - and y2, c ; y2 = a&c - and y0, b ; y0 = (a|c)&b - add h, y1 ; h = h + S1 + CH + k + w + S0 - or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c) - add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ - ROTATE_ARGS -%endm - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; void sha256_avx(void *input_data, UINT32 digest[8], UINT64 num_blks) -;; arg 1 : pointer to input data -;; arg 2 : pointer to digest -;; arg 3 : Num blocks -section .text -global sha256_avx -align 32 -sha256_avx: - push rbx -%ifndef LINUX - push rsi - push rdi -%endif - push rbp - push r13 - push r14 - push r15 - - sub rsp,STACK_SIZE -%ifndef LINUX - vmovdqa [rsp + _XMM_SAVE + 0*16],xmm6 - vmovdqa [rsp + _XMM_SAVE + 1*16],xmm7 - vmovdqa [rsp + _XMM_SAVE + 2*16],xmm8 - vmovdqa [rsp + _XMM_SAVE + 3*16],xmm9 - vmovdqa [rsp + _XMM_SAVE + 4*16],xmm10 - vmovdqa [rsp + _XMM_SAVE + 5*16],xmm11 - vmovdqa [rsp + _XMM_SAVE + 6*16],xmm12 - vmovdqa [rsp + _XMM_SAVE + 7*16],xmm13 -%endif - - shl NUM_BLKS, 6 ; convert to bytes - jz done_hash - add NUM_BLKS, INP ; pointer to end of data - mov [rsp + _INP_END], NUM_BLKS - - ;; load initial digest - mov a,[4*0 + CTX] - mov b,[4*1 + CTX] - mov c,[4*2 + CTX] - mov d,[4*3 + CTX] - mov e,[4*4 + CTX] - mov f,[4*5 + CTX] - mov g,[4*6 + CTX] - mov h,[4*7 + CTX] - - vmovdqa BYTE_FLIP_MASK, [PSHUFFLE_BYTE_FLIP_MASK wrt rip] - vmovdqa SHUF_00BA, [_SHUF_00BA wrt rip] - vmovdqa SHUF_DC00, [_SHUF_DC00 wrt rip] - -loop0: - lea TBL,[K256 wrt rip] - - ;; byte swap first 16 dwords - COPY_XMM_AND_BSWAP X0, [INP + 0*16], BYTE_FLIP_MASK - COPY_XMM_AND_BSWAP X1, [INP + 1*16], BYTE_FLIP_MASK - COPY_XMM_AND_BSWAP X2, [INP + 2*16], BYTE_FLIP_MASK - COPY_XMM_AND_BSWAP X3, [INP + 3*16], BYTE_FLIP_MASK - - mov [rsp + _INP], INP - - ;; schedule 48 input dwords, by doing 3 rounds of 16 each - mov SRND, 3 -align 16 -loop1: - vpaddd XFER, X0, [TBL + 0*16] - vmovdqa [rsp + _XFER], XFER - FOUR_ROUNDS_AND_SCHED - - vpaddd XFER, X0, [TBL + 1*16] - vmovdqa [rsp + _XFER], XFER - FOUR_ROUNDS_AND_SCHED - - vpaddd XFER, X0, [TBL + 2*16] - vmovdqa [rsp + _XFER], XFER - FOUR_ROUNDS_AND_SCHED - - vpaddd XFER, X0, [TBL + 3*16] - vmovdqa [rsp + _XFER], XFER - add TBL, 4*16 - FOUR_ROUNDS_AND_SCHED - - sub SRND, 1 - jne loop1 - - mov SRND, 2 -loop2: - vpaddd XFER, X0, [TBL + 0*16] - vmovdqa [rsp + _XFER], XFER - DO_ROUND 0 - DO_ROUND 1 - DO_ROUND 2 - DO_ROUND 3 - - vpaddd XFER, X1, [TBL + 1*16] - vmovdqa [rsp + _XFER], XFER - add TBL, 2*16 - DO_ROUND 0 - DO_ROUND 1 - DO_ROUND 2 - DO_ROUND 3 - - vmovdqa X0, X2 - vmovdqa X1, X3 - - sub SRND, 1 - jne loop2 - - - addm [4*0 + CTX],a - addm [4*1 + CTX],b - addm [4*2 + CTX],c - addm [4*3 + CTX],d - addm [4*4 + CTX],e - addm [4*5 + CTX],f - addm [4*6 + CTX],g - addm [4*7 + CTX],h - - mov INP, [rsp + _INP] - add INP, 64 - cmp INP, [rsp + _INP_END] - jne loop0 - -done_hash: -%ifndef LINUX - vmovdqa xmm6,[rsp + _XMM_SAVE + 0*16] - vmovdqa xmm7,[rsp + _XMM_SAVE + 1*16] - vmovdqa xmm8,[rsp + _XMM_SAVE + 2*16] - vmovdqa xmm9,[rsp + _XMM_SAVE + 3*16] - vmovdqa xmm10,[rsp + _XMM_SAVE + 4*16] - vmovdqa xmm11,[rsp + _XMM_SAVE + 5*16] - vmovdqa xmm12,[rsp + _XMM_SAVE + 6*16] - vmovdqa xmm13,[rsp + _XMM_SAVE + 7*16] -%endif - - - add rsp, STACK_SIZE - - pop r15 - pop r14 - pop r13 - pop rbp -%ifndef LINUX - pop rdi - pop rsi -%endif - pop rbx - - ret - - -section .data -align 64 -K256: - dd 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 - dd 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 - dd 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 - dd 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 - dd 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc - dd 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da - dd 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 - dd 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 - dd 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 - dd 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 - dd 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 - dd 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 - dd 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 - dd 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 - dd 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 - dd 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 - -PSHUFFLE_BYTE_FLIP_MASK: ddq 0x0c0d0e0f08090a0b0405060700010203 - -; shuffle xBxA -> 00BA -_SHUF_00BA: ddq 0xFFFFFFFFFFFFFFFF0b0a090803020100 - -; shuffle xDxC -> DC00 -_SHUF_DC00: ddq 0x0b0a090803020100FFFFFFFFFFFFFFFF - -section .note.GNU-stack noalloc noexec nowrite progbits diff --git a/solo-ckpool-source/src/sha256_code_release/sha256_avx2_rorx2.asm b/solo-ckpool-source/src/sha256_code_release/sha256_avx2_rorx2.asm deleted file mode 100644 index 226867b..0000000 --- a/solo-ckpool-source/src/sha256_code_release/sha256_avx2_rorx2.asm +++ /dev/null @@ -1,828 +0,0 @@ -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -; Copyright (c) 2012, Intel Corporation -; -; All rights reserved. -; -; Redistribution and use in source and binary forms, with or without -; modification, are permitted provided that the following conditions are -; met: -; -; * Redistributions of source code must retain the above copyright -; notice, this list of conditions and the following disclaimer. -; -; * Redistributions in binary form must reproduce the above copyright -; notice, this list of conditions and the following disclaimer in the -; documentation and/or other materials provided with the -; distribution. -; -; * Neither the name of the Intel Corporation nor the names of its -; contributors may be used to endorse or promote products derived from -; this software without specific prior written permission. -; -; -; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY -; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR -; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -; -; Example YASM command lines: -; Windows: yasm -Xvc -f x64 -rnasm -pnasm -o sha256_avx2_rorx2.obj -g cv8 sha256_avx2_rorx2.asm -; Linux: yasm -f x64 -f elf64 -X gnu -g dwarf2 -D LINUX -o sha256_avx2_rorx2.o sha256_avx2_rorx2.asm -; -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -; -; This code is described in an Intel White-Paper: -; "Fast SHA-256 Implementations on Intel Architecture Processors" -; -; To find it, surf to http://www.intel.com/p/en_US/embedded -; and search for that title. -; The paper is expected to be released roughly at the end of April, 2012 -; -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -; This code schedules 2 blocks at a time, with 4 lanes per block -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -%define VMOVDQ vmovdqu ;; assume buffers not aligned - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros - -; addm [mem], reg -; Add reg to mem using reg-mem add and store -%macro addm 2 - add %2, %1 - mov %1, %2 -%endm - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -%define X0 ymm4 -%define X1 ymm5 -%define X2 ymm6 -%define X3 ymm7 - -; XMM versions of above -%define XWORD0 xmm4 -%define XWORD1 xmm5 -%define XWORD2 xmm6 -%define XWORD3 xmm7 - -%define XTMP0 ymm0 -%define XTMP1 ymm1 -%define XTMP2 ymm2 -%define XTMP3 ymm3 -%define XTMP4 ymm8 -%define XFER ymm9 -%define XTMP5 ymm11 - -%define SHUF_00BA ymm10 ; shuffle xBxA -> 00BA -%define SHUF_DC00 ymm12 ; shuffle xDxC -> DC00 -%define BYTE_FLIP_MASK ymm13 - -%define X_BYTE_FLIP_MASK xmm13 ; XMM version of BYTE_FLIP_MASK - -%ifdef LINUX -%define NUM_BLKS rdx ; 3rd arg -%define CTX rsi ; 2nd arg -%define INP rdi ; 1st arg -%define c ecx -%define d r8d -%define e edx ; clobbers NUM_BLKS -%define y3 edi ; clobbers INP -%else -%define NUM_BLKS r8 ; 3rd arg -%define CTX rdx ; 2nd arg -%define INP rcx ; 1st arg -%define c edi -%define d esi -%define e r8d ; clobbers NUM_BLKS -%define y3 ecx ; clobbers INP - -%endif - - -%define TBL rbp -%define SRND CTX ; SRND is same register as CTX - -%define a eax -%define b ebx -%define f r9d -%define g r10d -%define h r11d -%define old_h r11d - -%define T1 r12d -%define y0 r13d -%define y1 r14d -%define y2 r15d - - -_XFER_SIZE equ 2*64*4 ; 2 blocks, 64 rounds, 4 bytes/round -%ifdef LINUX -_XMM_SAVE_SIZE equ 0 -%else -_XMM_SAVE_SIZE equ 8*16 -%endif -_INP_END_SIZE equ 8 -_INP_SIZE equ 8 -_CTX_SIZE equ 8 -_RSP_SIZE equ 8 - -_XFER equ 0 -_XMM_SAVE equ _XFER + _XFER_SIZE -_INP_END equ _XMM_SAVE + _XMM_SAVE_SIZE -_INP equ _INP_END + _INP_END_SIZE -_CTX equ _INP + _INP_SIZE -_RSP equ _CTX + _CTX_SIZE -STACK_SIZE equ _RSP + _RSP_SIZE - -; rotate_Xs -; Rotate values of symbols X0...X3 -%macro rotate_Xs 0 -%xdefine X_ X0 -%xdefine X0 X1 -%xdefine X1 X2 -%xdefine X2 X3 -%xdefine X3 X_ -%endm - -; ROTATE_ARGS -; Rotate values of symbols a...h -%macro ROTATE_ARGS 0 -%xdefine old_h h -%xdefine TMP_ h -%xdefine h g -%xdefine g f -%xdefine f e -%xdefine e d -%xdefine d c -%xdefine c b -%xdefine b a -%xdefine a TMP_ -%endm - -%macro FOUR_ROUNDS_AND_SCHED 1 -%define %%XFER %1 -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; - - mov y3, a ; y3 = a ; MAJA - rorx y0, e, 25 ; y0 = e >> 25 ; S1A - rorx y1, e, 11 ; y1 = e >> 11 ; S1B - - add h, dword[%%XFER+0*4] ; h = k + w + h ; -- - or y3, c ; y3 = a|c ; MAJA - vpalignr XTMP0, X3, X2, 4 ; XTMP0 = W[-7] - mov y2, f ; y2 = f ; CH - rorx T1, a, 13 ; T1 = a >> 13 ; S0B - - xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ; S1 - xor y2, g ; y2 = f^g ; CH - vpaddd XTMP0, XTMP0, X0 ; XTMP0 = W[-7] + W[-16]; y1 = (e >> 6) ; S1 - rorx y1, e, 6 ; y1 = (e >> 6) ; S1 - - and y2, e ; y2 = (f^g)&e ; CH - xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 - rorx y1, a, 22 ; y1 = a >> 22 ; S0A - add d, h ; d = k + w + h + d ; -- - - and y3, b ; y3 = (a|c)&b ; MAJA - vpalignr XTMP1, X1, X0, 4 ; XTMP1 = W[-15] - xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ; S0 - rorx T1, a, 2 ; T1 = (a >> 2) ; S0 - - xor y2, g ; y2 = CH = ((f^g)&e)^g ; CH - vpsrld XTMP2, XTMP1, 7 - xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 - mov T1, a ; T1 = a ; MAJB - and T1, c ; T1 = a&c ; MAJB - - add y2, y0 ; y2 = S1 + CH ; -- - vpslld XTMP3, XTMP1, (32-7) - or y3, T1 ; y3 = MAJ = (a|c)&b)|(a&c) ; MAJ - add h, y1 ; h = k + w + h + S0 ; -- - - add d, y2 ; d = k + w + h + d + S1 + CH = d + t1 ; -- - vpor XTMP3, XTMP3, XTMP2 ; XTMP3 = W[-15] ror 7 - - vpsrld XTMP2, XTMP1,18 - add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- - add h, y3 ; h = t1 + S0 + MAJ ; -- - - -ROTATE_ARGS - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 1 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; - - - mov y3, a ; y3 = a ; MAJA - rorx y0, e, 25 ; y0 = e >> 25 ; S1A - rorx y1, e, 11 ; y1 = e >> 11 ; S1B - add h, dword[%%XFER+1*4] ; h = k + w + h ; -- - or y3, c ; y3 = a|c ; MAJA - - - vpsrld XTMP4, XTMP1, 3 ; XTMP4 = W[-15] >> 3 - mov y2, f ; y2 = f ; CH - rorx T1, a, 13 ; T1 = a >> 13 ; S0B - xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ; S1 - xor y2, g ; y2 = f^g ; CH - - - rorx y1, e, 6 ; y1 = (e >> 6) ; S1 - xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 - rorx y1, a, 22 ; y1 = a >> 22 ; S0A - and y2, e ; y2 = (f^g)&e ; CH - add d, h ; d = k + w + h + d ; -- - - vpslld XTMP1, XTMP1, (32-18) - and y3, b ; y3 = (a|c)&b ; MAJA - xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ; S0 - - vpxor XTMP3, XTMP3, XTMP1 - rorx T1, a, 2 ; T1 = (a >> 2) ; S0 - xor y2, g ; y2 = CH = ((f^g)&e)^g ; CH - - vpxor XTMP3, XTMP3, XTMP2 ; XTMP3 = W[-15] ror 7 ^ W[-15] ror 18 - xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 - mov T1, a ; T1 = a ; MAJB - and T1, c ; T1 = a&c ; MAJB - add y2, y0 ; y2 = S1 + CH ; -- - - vpxor XTMP1, XTMP3, XTMP4 ; XTMP1 = s0 - vpshufd XTMP2, X3, 11111010b ; XTMP2 = W[-2] {BBAA} - or y3, T1 ; y3 = MAJ = (a|c)&b)|(a&c) ; MAJ - add h, y1 ; h = k + w + h + S0 ; -- - - vpaddd XTMP0, XTMP0, XTMP1 ; XTMP0 = W[-16] + W[-7] + s0 - add d, y2 ; d = k + w + h + d + S1 + CH = d + t1 ; -- - add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- - add h, y3 ; h = t1 + S0 + MAJ ; -- - - vpsrld XTMP4, XTMP2, 10 ; XTMP4 = W[-2] >> 10 {BBAA} - - -ROTATE_ARGS - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 2 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; - - mov y3, a ; y3 = a ; MAJA - rorx y0, e, 25 ; y0 = e >> 25 ; S1A - add h, [%%XFER+2*4] ; h = k + w + h ; -- - - vpsrlq XTMP3, XTMP2, 19 ; XTMP3 = W[-2] ror 19 {xBxA} - rorx y1, e, 11 ; y1 = e >> 11 ; S1B - or y3, c ; y3 = a|c ; MAJA - mov y2, f ; y2 = f ; CH - xor y2, g ; y2 = f^g ; CH - - rorx T1, a, 13 ; T1 = a >> 13 ; S0B - xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ; S1 - vpsrlq XTMP2, XTMP2, 17 ; XTMP2 = W[-2] ror 17 {xBxA} - and y2, e ; y2 = (f^g)&e ; CH - - rorx y1, e, 6 ; y1 = (e >> 6) ; S1 - vpxor XTMP2, XTMP2, XTMP3 - add d, h ; d = k + w + h + d ; -- - and y3, b ; y3 = (a|c)&b ; MAJA - - xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 - rorx y1, a, 22 ; y1 = a >> 22 ; S0A - vpxor XTMP4, XTMP4, XTMP2 ; XTMP4 = s1 {xBxA} - xor y2, g ; y2 = CH = ((f^g)&e)^g ; CH - - vpshufb XTMP4, XTMP4, SHUF_00BA ; XTMP4 = s1 {00BA} - xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ; S0 - rorx T1, a, 2 ; T1 = (a >> 2) ; S0 - vpaddd XTMP0, XTMP0, XTMP4 ; XTMP0 = {..., ..., W[1], W[0]} - - xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 - mov T1, a ; T1 = a ; MAJB - and T1, c ; T1 = a&c ; MAJB - add y2, y0 ; y2 = S1 + CH ; -- - vpshufd XTMP2, XTMP0, 01010000b ; XTMP2 = W[-2] {DDCC} - - or y3, T1 ; y3 = MAJ = (a|c)&b)|(a&c) ; MAJ - add h, y1 ; h = k + w + h + S0 ; -- - add d, y2 ; d = k + w + h + d + S1 + CH = d + t1 ; -- - add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- - - add h, y3 ; h = t1 + S0 + MAJ ; -- - - -ROTATE_ARGS - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 3 ;;;;;;;;;;;;;;;;;;;;;;;;;;;; - - mov y3, a ; y3 = a ; MAJA - rorx y0, e, 25 ; y0 = e >> 25 ; S1A - rorx y1, e, 11 ; y1 = e >> 11 ; S1B - add h, dword[%%XFER+3*4] ; h = k + w + h ; -- - or y3, c ; y3 = a|c ; MAJA - - - vpsrld XTMP5, XTMP2, 10 ; XTMP5 = W[-2] >> 10 {DDCC} - mov y2, f ; y2 = f ; CH - rorx T1, a, 13 ; T1 = a >> 13 ; S0B - xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ; S1 - xor y2, g ; y2 = f^g ; CH - - - vpsrlq XTMP3, XTMP2, 19 ; XTMP3 = W[-2] ror 19 {xDxC} - rorx y1, e, 6 ; y1 = (e >> 6) ; S1 - and y2, e ; y2 = (f^g)&e ; CH - add d, h ; d = k + w + h + d ; -- - and y3, b ; y3 = (a|c)&b ; MAJA - - vpsrlq XTMP2, XTMP2, 17 ; XTMP2 = W[-2] ror 17 {xDxC} - xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 - xor y2, g ; y2 = CH = ((f^g)&e)^g ; CH - - vpxor XTMP2, XTMP2, XTMP3 - rorx y1, a, 22 ; y1 = a >> 22 ; S0A - add y2, y0 ; y2 = S1 + CH ; -- - - vpxor XTMP5, XTMP5, XTMP2 ; XTMP5 = s1 {xDxC} - xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ; S0 - add d, y2 ; d = k + w + h + d + S1 + CH = d + t1 ; -- - - rorx T1, a, 2 ; T1 = (a >> 2) ; S0 - vpshufb XTMP5, XTMP5, SHUF_DC00 ; XTMP5 = s1 {DC00} - - vpaddd X0, XTMP5, XTMP0 ; X0 = {W[3], W[2], W[1], W[0]} - xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 - mov T1, a ; T1 = a ; MAJB - and T1, c ; T1 = a&c ; MAJB - or y3, T1 ; y3 = MAJ = (a|c)&b)|(a&c) ; MAJ - - add h, y1 ; h = k + w + h + S0 ; -- - add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- - add h, y3 ; h = t1 + S0 + MAJ ; -- - -ROTATE_ARGS -rotate_Xs -%endm - -%macro DO_4ROUNDS 1 -%define %%XFER %1 -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 0 ;;;;;;;;;;;;;;;;;;;;;;;;;;; - - mov y2, f ; y2 = f ; CH - rorx y0, e, 25 ; y0 = e >> 25 ; S1A - rorx y1, e, 11 ; y1 = e >> 11 ; S1B - xor y2, g ; y2 = f^g ; CH - - xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ; S1 - rorx y1, e, 6 ; y1 = (e >> 6) ; S1 - and y2, e ; y2 = (f^g)&e ; CH - - xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 - rorx T1, a, 13 ; T1 = a >> 13 ; S0B - xor y2, g ; y2 = CH = ((f^g)&e)^g ; CH - rorx y1, a, 22 ; y1 = a >> 22 ; S0A - mov y3, a ; y3 = a ; MAJA - - xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ; S0 - rorx T1, a, 2 ; T1 = (a >> 2) ; S0 - add h, dword[%%XFER + 4*0] ; h = k + w + h ; -- - or y3, c ; y3 = a|c ; MAJA - - xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 - mov T1, a ; T1 = a ; MAJB - and y3, b ; y3 = (a|c)&b ; MAJA - and T1, c ; T1 = a&c ; MAJB - add y2, y0 ; y2 = S1 + CH ; -- - - - add d, h ; d = k + w + h + d ; -- - or y3, T1 ; y3 = MAJ = (a|c)&b)|(a&c) ; MAJ - add h, y1 ; h = k + w + h + S0 ; -- - - add d, y2 ; d = k + w + h + d + S1 + CH = d + t1 ; -- - - - ;add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- - - ;add h, y3 ; h = t1 + S0 + MAJ ; -- - - ROTATE_ARGS - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 1 ;;;;;;;;;;;;;;;;;;;;;;;;;;; - - add old_h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- - mov y2, f ; y2 = f ; CH - rorx y0, e, 25 ; y0 = e >> 25 ; S1A - rorx y1, e, 11 ; y1 = e >> 11 ; S1B - xor y2, g ; y2 = f^g ; CH - - xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ; S1 - rorx y1, e, 6 ; y1 = (e >> 6) ; S1 - and y2, e ; y2 = (f^g)&e ; CH - add old_h, y3 ; h = t1 + S0 + MAJ ; -- - - xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 - rorx T1, a, 13 ; T1 = a >> 13 ; S0B - xor y2, g ; y2 = CH = ((f^g)&e)^g ; CH - rorx y1, a, 22 ; y1 = a >> 22 ; S0A - mov y3, a ; y3 = a ; MAJA - - xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ; S0 - rorx T1, a, 2 ; T1 = (a >> 2) ; S0 - add h, dword[%%XFER + 4*1] ; h = k + w + h ; -- - or y3, c ; y3 = a|c ; MAJA - - xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 - mov T1, a ; T1 = a ; MAJB - and y3, b ; y3 = (a|c)&b ; MAJA - and T1, c ; T1 = a&c ; MAJB - add y2, y0 ; y2 = S1 + CH ; -- - - - add d, h ; d = k + w + h + d ; -- - or y3, T1 ; y3 = MAJ = (a|c)&b)|(a&c) ; MAJ - add h, y1 ; h = k + w + h + S0 ; -- - - add d, y2 ; d = k + w + h + d + S1 + CH = d + t1 ; -- - - - ;add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- - - ;add h, y3 ; h = t1 + S0 + MAJ ; -- - - ROTATE_ARGS - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 2 ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - - add old_h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- - mov y2, f ; y2 = f ; CH - rorx y0, e, 25 ; y0 = e >> 25 ; S1A - rorx y1, e, 11 ; y1 = e >> 11 ; S1B - xor y2, g ; y2 = f^g ; CH - - xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ; S1 - rorx y1, e, 6 ; y1 = (e >> 6) ; S1 - and y2, e ; y2 = (f^g)&e ; CH - add old_h, y3 ; h = t1 + S0 + MAJ ; -- - - xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 - rorx T1, a, 13 ; T1 = a >> 13 ; S0B - xor y2, g ; y2 = CH = ((f^g)&e)^g ; CH - rorx y1, a, 22 ; y1 = a >> 22 ; S0A - mov y3, a ; y3 = a ; MAJA - - xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ; S0 - rorx T1, a, 2 ; T1 = (a >> 2) ; S0 - add h, dword[%%XFER + 4*2] ; h = k + w + h ; -- - or y3, c ; y3 = a|c ; MAJA - - xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 - mov T1, a ; T1 = a ; MAJB - and y3, b ; y3 = (a|c)&b ; MAJA - and T1, c ; T1 = a&c ; MAJB - add y2, y0 ; y2 = S1 + CH ; -- - - - add d, h ; d = k + w + h + d ; -- - or y3, T1 ; y3 = MAJ = (a|c)&b)|(a&c) ; MAJ - add h, y1 ; h = k + w + h + S0 ; -- - - add d, y2 ; d = k + w + h + d + S1 + CH = d + t1 ; -- - - - ;add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- - - ;add h, y3 ; h = t1 + S0 + MAJ ; -- - - ROTATE_ARGS - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND N + 3 ;;;;;;;;;;;;;;;;;;;;;;;;;;; - - add old_h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- - mov y2, f ; y2 = f ; CH - rorx y0, e, 25 ; y0 = e >> 25 ; S1A - rorx y1, e, 11 ; y1 = e >> 11 ; S1B - xor y2, g ; y2 = f^g ; CH - - xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ; S1 - rorx y1, e, 6 ; y1 = (e >> 6) ; S1 - and y2, e ; y2 = (f^g)&e ; CH - add old_h, y3 ; h = t1 + S0 + MAJ ; -- - - xor y0, y1 ; y0 = (e>>25) ^ (e>>11) ^ (e>>6) ; S1 - rorx T1, a, 13 ; T1 = a >> 13 ; S0B - xor y2, g ; y2 = CH = ((f^g)&e)^g ; CH - rorx y1, a, 22 ; y1 = a >> 22 ; S0A - mov y3, a ; y3 = a ; MAJA - - xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ; S0 - rorx T1, a, 2 ; T1 = (a >> 2) ; S0 - add h, dword[%%XFER + 4*3] ; h = k + w + h ; -- - or y3, c ; y3 = a|c ; MAJA - - xor y1, T1 ; y1 = (a>>22) ^ (a>>13) ^ (a>>2) ; S0 - mov T1, a ; T1 = a ; MAJB - and y3, b ; y3 = (a|c)&b ; MAJA - and T1, c ; T1 = a&c ; MAJB - add y2, y0 ; y2 = S1 + CH ; -- - - - add d, h ; d = k + w + h + d ; -- - or y3, T1 ; y3 = MAJ = (a|c)&b)|(a&c) ; MAJ - add h, y1 ; h = k + w + h + S0 ; -- - - add d, y2 ; d = k + w + h + d + S1 + CH = d + t1 ; -- - - - add h, y2 ; h = k + w + h + S0 + S1 + CH = t1 + S0; -- - - add h, y3 ; h = t1 + S0 + MAJ ; -- - - ROTATE_ARGS - -%endm - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; void sha256_rorx(void *input_data, UINT32 digest[8], UINT64 num_blks) -;; arg 1 : pointer to input data -;; arg 2 : pointer to digest -;; arg 3 : Num blocks -section .text -global sha256_rorx -align 32 -sha256_rorx: - push rbx -%ifndef LINUX - push rsi - push rdi -%endif - push rbp - push r12 - push r13 - push r14 - push r15 - - mov rax, rsp - sub rsp,STACK_SIZE - and rsp, -32 - mov [rsp + _RSP], rax - -%ifndef LINUX - vmovdqa [rsp + _XMM_SAVE + 0*16],xmm6 - vmovdqa [rsp + _XMM_SAVE + 1*16],xmm7 - vmovdqa [rsp + _XMM_SAVE + 2*16],xmm8 - vmovdqa [rsp + _XMM_SAVE + 3*16],xmm9 - vmovdqa [rsp + _XMM_SAVE + 4*16],xmm10 - vmovdqa [rsp + _XMM_SAVE + 5*16],xmm11 - vmovdqa [rsp + _XMM_SAVE + 6*16],xmm12 - vmovdqa [rsp + _XMM_SAVE + 7*16],xmm13 -%endif - - shl NUM_BLKS, 6 ; convert to bytes - jz done_hash - lea NUM_BLKS, [NUM_BLKS + INP - 64] ; pointer to last block - mov [rsp + _INP_END], NUM_BLKS - - cmp INP, NUM_BLKS - je only_one_block - - ;; load initial digest - mov a,[4*0 + CTX] - mov b,[4*1 + CTX] - mov c,[4*2 + CTX] - mov d,[4*3 + CTX] - mov e,[4*4 + CTX] - mov f,[4*5 + CTX] - mov g,[4*6 + CTX] - mov h,[4*7 + CTX] - - vmovdqa BYTE_FLIP_MASK, [PSHUFFLE_BYTE_FLIP_MASK wrt rip] - vmovdqa SHUF_00BA, [_SHUF_00BA wrt rip] - vmovdqa SHUF_DC00, [_SHUF_DC00 wrt rip] - - mov [rsp + _CTX], CTX - -loop0: - lea TBL,[K256 wrt rip] - - ;; Load first 16 dwords from two blocks - VMOVDQ XTMP0, [INP + 0*32] - VMOVDQ XTMP1, [INP + 1*32] - VMOVDQ XTMP2, [INP + 2*32] - VMOVDQ XTMP3, [INP + 3*32] - - ;; byte swap data - vpshufb XTMP0, XTMP0, BYTE_FLIP_MASK - vpshufb XTMP1, XTMP1, BYTE_FLIP_MASK - vpshufb XTMP2, XTMP2, BYTE_FLIP_MASK - vpshufb XTMP3, XTMP3, BYTE_FLIP_MASK - - ;; transpose data into high/low halves - vperm2i128 X0, XTMP0, XTMP2, 0x20 - vperm2i128 X1, XTMP0, XTMP2, 0x31 - vperm2i128 X2, XTMP1, XTMP3, 0x20 - vperm2i128 X3, XTMP1, XTMP3, 0x31 - -last_block_enter: - add INP, 64 - mov [rsp + _INP], INP - - ;; schedule 48 input dwords, by doing 3 rounds of 12 each - xor SRND, SRND - -align 16 -loop1: - vpaddd XFER, X0, [TBL + SRND + 0*32] - vmovdqa [rsp + _XFER + SRND + 0*32], XFER - FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 0*32 - - vpaddd XFER, X0, [TBL + SRND + 1*32] - vmovdqa [rsp + _XFER + SRND + 1*32], XFER - FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 1*32 - - vpaddd XFER, X0, [TBL + SRND + 2*32] - vmovdqa [rsp + _XFER + SRND + 2*32], XFER - FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 2*32 - - vpaddd XFER, X0, [TBL + SRND + 3*32] - vmovdqa [rsp + _XFER + SRND + 3*32], XFER - FOUR_ROUNDS_AND_SCHED rsp + _XFER + SRND + 3*32 - - add SRND, 4*32 - cmp SRND, 3 * 4*32 - jb loop1 - -loop2: - ;; Do last 16 rounds with no scheduling - vpaddd XFER, X0, [TBL + SRND + 0*32] - vmovdqa [rsp + _XFER + SRND + 0*32], XFER - DO_4ROUNDS rsp + _XFER + SRND + 0*32 - vpaddd XFER, X1, [TBL + SRND + 1*32] - vmovdqa [rsp + _XFER + SRND + 1*32], XFER - DO_4ROUNDS rsp + _XFER + SRND + 1*32 - add SRND, 2*32 - - vmovdqa X0, X2 - vmovdqa X1, X3 - - cmp SRND, 4 * 4*32 - jb loop2 - - mov CTX, [rsp + _CTX] - mov INP, [rsp + _INP] - - addm [4*0 + CTX],a - addm [4*1 + CTX],b - addm [4*2 + CTX],c - addm [4*3 + CTX],d - addm [4*4 + CTX],e - addm [4*5 + CTX],f - addm [4*6 + CTX],g - addm [4*7 + CTX],h - - cmp INP, [rsp + _INP_END] - ja done_hash - - ;;;; Do second block using previously scheduled results - xor SRND, SRND -align 16 -loop3: - DO_4ROUNDS rsp + _XFER + SRND + 0*32 + 16 - DO_4ROUNDS rsp + _XFER + SRND + 1*32 + 16 - add SRND, 2*32 - cmp SRND, 4 * 4*32 - jb loop3 - - mov CTX, [rsp + _CTX] - mov INP, [rsp + _INP] - add INP, 64 - - addm [4*0 + CTX],a - addm [4*1 + CTX],b - addm [4*2 + CTX],c - addm [4*3 + CTX],d - addm [4*4 + CTX],e - addm [4*5 + CTX],f - addm [4*6 + CTX],g - addm [4*7 + CTX],h - - cmp INP, [rsp + _INP_END] - jb loop0 - ja done_hash - -do_last_block: - ;;;; do last block - lea TBL,[K256 wrt rip] - - VMOVDQ XWORD0, [INP + 0*16] - VMOVDQ XWORD1, [INP + 1*16] - VMOVDQ XWORD2, [INP + 2*16] - VMOVDQ XWORD3, [INP + 3*16] - - vpshufb XWORD0, XWORD0, X_BYTE_FLIP_MASK - vpshufb XWORD1, XWORD1, X_BYTE_FLIP_MASK - vpshufb XWORD2, XWORD2, X_BYTE_FLIP_MASK - vpshufb XWORD3, XWORD3, X_BYTE_FLIP_MASK - - jmp last_block_enter - -only_one_block: - - ;; load initial digest - mov a,[4*0 + CTX] - mov b,[4*1 + CTX] - mov c,[4*2 + CTX] - mov d,[4*3 + CTX] - mov e,[4*4 + CTX] - mov f,[4*5 + CTX] - mov g,[4*6 + CTX] - mov h,[4*7 + CTX] - - vmovdqa BYTE_FLIP_MASK, [PSHUFFLE_BYTE_FLIP_MASK wrt rip] - vmovdqa SHUF_00BA, [_SHUF_00BA wrt rip] - vmovdqa SHUF_DC00, [_SHUF_DC00 wrt rip] - - mov [rsp + _CTX], CTX - jmp do_last_block - -done_hash: -%ifndef LINUX - vmovdqa xmm6,[rsp + _XMM_SAVE + 0*16] - vmovdqa xmm7,[rsp + _XMM_SAVE + 1*16] - vmovdqa xmm8,[rsp + _XMM_SAVE + 2*16] - vmovdqa xmm9,[rsp + _XMM_SAVE + 3*16] - vmovdqa xmm10,[rsp + _XMM_SAVE + 4*16] - vmovdqa xmm11,[rsp + _XMM_SAVE + 5*16] - vmovdqa xmm12,[rsp + _XMM_SAVE + 6*16] - vmovdqa xmm13,[rsp + _XMM_SAVE + 7*16] -%endif - - mov rsp, [rsp + _RSP] - - pop r15 - pop r14 - pop r13 - pop r12 - pop rbp -%ifndef LINUX - pop rdi - pop rsi -%endif - pop rbx - - ret - -section .data -align 64 -K256: - dd 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 - dd 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 - dd 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 - dd 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 - dd 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 - dd 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 - dd 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 - dd 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 - dd 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc - dd 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc - dd 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da - dd 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da - dd 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 - dd 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 - dd 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 - dd 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 - dd 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 - dd 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 - dd 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 - dd 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 - dd 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 - dd 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 - dd 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 - dd 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 - dd 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 - dd 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 - dd 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 - dd 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 - dd 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 - dd 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 - dd 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 - dd 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 - -PSHUFFLE_BYTE_FLIP_MASK: - ddq 0x0c0d0e0f08090a0b0405060700010203,0x0c0d0e0f08090a0b0405060700010203 - -; shuffle xBxA -> 00BA -_SHUF_00BA: - ddq 0xFFFFFFFFFFFFFFFF0b0a090803020100,0xFFFFFFFFFFFFFFFF0b0a090803020100 - -; shuffle xDxC -> DC00 -_SHUF_DC00: - ddq 0x0b0a090803020100FFFFFFFFFFFFFFFF,0x0b0a090803020100FFFFFFFFFFFFFFFF - -section .note.GNU-stack noalloc noexec nowrite progbits diff --git a/solo-ckpool-source/src/sha256_code_release/sha256_sse4.asm b/solo-ckpool-source/src/sha256_code_release/sha256_sse4.asm deleted file mode 100644 index 2d828e1..0000000 --- a/solo-ckpool-source/src/sha256_code_release/sha256_sse4.asm +++ /dev/null @@ -1,546 +0,0 @@ -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -; Copyright (c) 2012, Intel Corporation -; -; All rights reserved. -; -; Redistribution and use in source and binary forms, with or without -; modification, are permitted provided that the following conditions are -; met: -; -; * Redistributions of source code must retain the above copyright -; notice, this list of conditions and the following disclaimer. -; -; * Redistributions in binary form must reproduce the above copyright -; notice, this list of conditions and the following disclaimer in the -; documentation and/or other materials provided with the -; distribution. -; -; * Neither the name of the Intel Corporation nor the names of its -; contributors may be used to endorse or promote products derived from -; this software without specific prior written permission. -; -; -; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY -; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR -; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -; -; Example YASM command lines: -; Windows: yasm -Xvc -f x64 -rnasm -pnasm -o sha256_sse4.obj -g cv8 sha256_sse4.asm -; Linux: yasm -f x64 -f elf64 -X gnu -g dwarf2 -D LINUX -o sha256_sse4.o sha256_sse4.asm -; -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -; -; This code is described in an Intel White-Paper: -; "Fast SHA-256 Implementations on Intel Architecture Processors" -; -; To find it, surf to http://www.intel.com/p/en_US/embedded -; and search for that title. -; The paper is expected to be released roughly at the end of April, 2012 -; -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -; This code schedules 1 blocks at a time, with 4 lanes per block -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -%define MOVDQ movdqu ;; assume buffers not aligned - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; Define Macros - -; addm [mem], reg -; Add reg to mem using reg-mem add and store -%macro addm 2 - add %2, %1 - mov %1, %2 -%endm - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -; COPY_XMM_AND_BSWAP xmm, [mem], byte_flip_mask -; Load xmm with mem and byte swap each dword -%macro COPY_XMM_AND_BSWAP 3 - MOVDQ %1, %2 - pshufb %1, %3 -%endmacro - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - -%define X0 xmm4 -%define X1 xmm5 -%define X2 xmm6 -%define X3 xmm7 - -%define XTMP0 xmm0 -%define XTMP1 xmm1 -%define XTMP2 xmm2 -%define XTMP3 xmm3 -%define XTMP4 xmm8 -%define XFER xmm9 - -%define SHUF_00BA xmm10 ; shuffle xBxA -> 00BA -%define SHUF_DC00 xmm11 ; shuffle xDxC -> DC00 -%define BYTE_FLIP_MASK xmm12 - -%ifdef LINUX -%define NUM_BLKS rdx ; 3rd arg -%define CTX rsi ; 2nd arg -%define INP rdi ; 1st arg - -%define SRND rdi ; clobbers INP -%define c ecx -%define d r8d -%define e edx -%else -%define NUM_BLKS r8 ; 3rd arg -%define CTX rdx ; 2nd arg -%define INP rcx ; 1st arg - -%define SRND rcx ; clobbers INP -%define c edi -%define d esi -%define e r8d - -%endif -%define TBL rbp -%define a eax -%define b ebx - -%define f r9d -%define g r10d -%define h r11d - -%define y0 r13d -%define y1 r14d -%define y2 r15d - - - -_INP_END_SIZE equ 8 -_INP_SIZE equ 8 -_XFER_SIZE equ 8 -%ifdef LINUX -_XMM_SAVE_SIZE equ 0 -%else -_XMM_SAVE_SIZE equ 7*16 -%endif -; STACK_SIZE plus pushes must be an odd multiple of 8 -_ALIGN_SIZE equ 8 - -_INP_END equ 0 -_INP equ _INP_END + _INP_END_SIZE -_XFER equ _INP + _INP_SIZE -_XMM_SAVE equ _XFER + _XFER_SIZE + _ALIGN_SIZE -STACK_SIZE equ _XMM_SAVE + _XMM_SAVE_SIZE - -; rotate_Xs -; Rotate values of symbols X0...X3 -%macro rotate_Xs 0 -%xdefine X_ X0 -%xdefine X0 X1 -%xdefine X1 X2 -%xdefine X2 X3 -%xdefine X3 X_ -%endm - -; ROTATE_ARGS -; Rotate values of symbols a...h -%macro ROTATE_ARGS 0 -%xdefine TMP_ h -%xdefine h g -%xdefine g f -%xdefine f e -%xdefine e d -%xdefine d c -%xdefine c b -%xdefine b a -%xdefine a TMP_ -%endm - -%macro FOUR_ROUNDS_AND_SCHED 0 - ;; compute s0 four at a time and s1 two at a time - ;; compute W[-16] + W[-7] 4 at a time - movdqa XTMP0, X3 - mov y0, e ; y0 = e - ror y0, (25-11) ; y0 = e >> (25-11) - mov y1, a ; y1 = a - palignr XTMP0, X2, 4 ; XTMP0 = W[-7] - ror y1, (22-13) ; y1 = a >> (22-13) - xor y0, e ; y0 = e ^ (e >> (25-11)) - mov y2, f ; y2 = f - ror y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6)) - movdqa XTMP1, X1 - xor y1, a ; y1 = a ^ (a >> (22-13) - xor y2, g ; y2 = f^g - paddd XTMP0, X0 ; XTMP0 = W[-7] + W[-16] - xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - and y2, e ; y2 = (f^g)&e - ror y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2)) - ;; compute s0 - palignr XTMP1, X0, 4 ; XTMP1 = W[-15] - xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - ror y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - xor y2, g ; y2 = CH = ((f^g)&e)^g - movdqa XTMP2, XTMP1 ; XTMP2 = W[-15] - ror y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - add y2, y0 ; y2 = S1 + CH - add y2, [rsp + _XFER + 0*4] ; y2 = k + w + S1 + CH - movdqa XTMP3, XTMP1 ; XTMP3 = W[-15] - mov y0, a ; y0 = a - add h, y2 ; h = h + S1 + CH + k + w - mov y2, a ; y2 = a - pslld XTMP1, (32-7) - or y0, c ; y0 = a|c - add d, h ; d = d + h + S1 + CH + k + w - and y2, c ; y2 = a&c - psrld XTMP2, 7 - and y0, b ; y0 = (a|c)&b - add h, y1 ; h = h + S1 + CH + k + w + S0 - por XTMP1, XTMP2 ; XTMP1 = W[-15] ror 7 - or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c) - add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ - -ROTATE_ARGS - movdqa XTMP2, XTMP3 ; XTMP2 = W[-15] - mov y0, e ; y0 = e - mov y1, a ; y1 = a - movdqa XTMP4, XTMP3 ; XTMP4 = W[-15] - ror y0, (25-11) ; y0 = e >> (25-11) - xor y0, e ; y0 = e ^ (e >> (25-11)) - mov y2, f ; y2 = f - ror y1, (22-13) ; y1 = a >> (22-13) - pslld XTMP3, (32-18) - xor y1, a ; y1 = a ^ (a >> (22-13) - ror y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6)) - xor y2, g ; y2 = f^g - psrld XTMP2, 18 - ror y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2)) - xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - and y2, e ; y2 = (f^g)&e - ror y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - pxor XTMP1, XTMP3 - xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - xor y2, g ; y2 = CH = ((f^g)&e)^g - psrld XTMP4, 3 ; XTMP4 = W[-15] >> 3 - add y2, y0 ; y2 = S1 + CH - add y2, [rsp + _XFER + 1*4] ; y2 = k + w + S1 + CH - ror y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - pxor XTMP1, XTMP2 ; XTMP1 = W[-15] ror 7 ^ W[-15] ror 18 - mov y0, a ; y0 = a - add h, y2 ; h = h + S1 + CH + k + w - mov y2, a ; y2 = a - pxor XTMP1, XTMP4 ; XTMP1 = s0 - or y0, c ; y0 = a|c - add d, h ; d = d + h + S1 + CH + k + w - and y2, c ; y2 = a&c - ;; compute low s1 - pshufd XTMP2, X3, 11111010b ; XTMP2 = W[-2] {BBAA} - and y0, b ; y0 = (a|c)&b - add h, y1 ; h = h + S1 + CH + k + w + S0 - paddd XTMP0, XTMP1 ; XTMP0 = W[-16] + W[-7] + s0 - or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c) - add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ - -ROTATE_ARGS - movdqa XTMP3, XTMP2 ; XTMP3 = W[-2] {BBAA} - mov y0, e ; y0 = e - mov y1, a ; y1 = a - ror y0, (25-11) ; y0 = e >> (25-11) - movdqa XTMP4, XTMP2 ; XTMP4 = W[-2] {BBAA} - xor y0, e ; y0 = e ^ (e >> (25-11)) - ror y1, (22-13) ; y1 = a >> (22-13) - mov y2, f ; y2 = f - xor y1, a ; y1 = a ^ (a >> (22-13) - ror y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6)) - psrlq XTMP2, 17 ; XTMP2 = W[-2] ror 17 {xBxA} - xor y2, g ; y2 = f^g - psrlq XTMP3, 19 ; XTMP3 = W[-2] ror 19 {xBxA} - xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - and y2, e ; y2 = (f^g)&e - psrld XTMP4, 10 ; XTMP4 = W[-2] >> 10 {BBAA} - ror y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2)) - xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - xor y2, g ; y2 = CH = ((f^g)&e)^g - ror y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - pxor XTMP2, XTMP3 - add y2, y0 ; y2 = S1 + CH - ror y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - add y2, [rsp + _XFER + 2*4] ; y2 = k + w + S1 + CH - pxor XTMP4, XTMP2 ; XTMP4 = s1 {xBxA} - mov y0, a ; y0 = a - add h, y2 ; h = h + S1 + CH + k + w - mov y2, a ; y2 = a - pshufb XTMP4, SHUF_00BA ; XTMP4 = s1 {00BA} - or y0, c ; y0 = a|c - add d, h ; d = d + h + S1 + CH + k + w - and y2, c ; y2 = a&c - paddd XTMP0, XTMP4 ; XTMP0 = {..., ..., W[1], W[0]} - and y0, b ; y0 = (a|c)&b - add h, y1 ; h = h + S1 + CH + k + w + S0 - ;; compute high s1 - pshufd XTMP2, XTMP0, 01010000b ; XTMP2 = W[-2] {DDCC} - or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c) - add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ - -ROTATE_ARGS - movdqa XTMP3, XTMP2 ; XTMP3 = W[-2] {DDCC} - mov y0, e ; y0 = e - ror y0, (25-11) ; y0 = e >> (25-11) - mov y1, a ; y1 = a - movdqa X0, XTMP2 ; X0 = W[-2] {DDCC} - ror y1, (22-13) ; y1 = a >> (22-13) - xor y0, e ; y0 = e ^ (e >> (25-11)) - mov y2, f ; y2 = f - ror y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6)) - psrlq XTMP2, 17 ; XTMP2 = W[-2] ror 17 {xDxC} - xor y1, a ; y1 = a ^ (a >> (22-13) - xor y2, g ; y2 = f^g - psrlq XTMP3, 19 ; XTMP3 = W[-2] ror 19 {xDxC} - xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - and y2, e ; y2 = (f^g)&e - ror y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2)) - psrld X0, 10 ; X0 = W[-2] >> 10 {DDCC} - xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - ror y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - xor y2, g ; y2 = CH = ((f^g)&e)^g - pxor XTMP2, XTMP3 - ror y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - add y2, y0 ; y2 = S1 + CH - add y2, [rsp + _XFER + 3*4] ; y2 = k + w + S1 + CH - pxor X0, XTMP2 ; X0 = s1 {xDxC} - mov y0, a ; y0 = a - add h, y2 ; h = h + S1 + CH + k + w - mov y2, a ; y2 = a - pshufb X0, SHUF_DC00 ; X0 = s1 {DC00} - or y0, c ; y0 = a|c - add d, h ; d = d + h + S1 + CH + k + w - and y2, c ; y2 = a&c - paddd X0, XTMP0 ; X0 = {W[3], W[2], W[1], W[0]} - and y0, b ; y0 = (a|c)&b - add h, y1 ; h = h + S1 + CH + k + w + S0 - or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c) - add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ - -ROTATE_ARGS -rotate_Xs -%endm - -;; input is [rsp + _XFER + %1 * 4] -%macro DO_ROUND 1 - mov y0, e ; y0 = e - ror y0, (25-11) ; y0 = e >> (25-11) - mov y1, a ; y1 = a - xor y0, e ; y0 = e ^ (e >> (25-11)) - ror y1, (22-13) ; y1 = a >> (22-13) - mov y2, f ; y2 = f - xor y1, a ; y1 = a ^ (a >> (22-13) - ror y0, (11-6) ; y0 = (e >> (11-6)) ^ (e >> (25-6)) - xor y2, g ; y2 = f^g - xor y0, e ; y0 = e ^ (e >> (11-6)) ^ (e >> (25-6)) - ror y1, (13-2) ; y1 = (a >> (13-2)) ^ (a >> (22-2)) - and y2, e ; y2 = (f^g)&e - xor y1, a ; y1 = a ^ (a >> (13-2)) ^ (a >> (22-2)) - ror y0, 6 ; y0 = S1 = (e>>6) & (e>>11) ^ (e>>25) - xor y2, g ; y2 = CH = ((f^g)&e)^g - add y2, y0 ; y2 = S1 + CH - ror y1, 2 ; y1 = S0 = (a>>2) ^ (a>>13) ^ (a>>22) - add y2, [rsp + _XFER + %1 * 4] ; y2 = k + w + S1 + CH - mov y0, a ; y0 = a - add h, y2 ; h = h + S1 + CH + k + w - mov y2, a ; y2 = a - or y0, c ; y0 = a|c - add d, h ; d = d + h + S1 + CH + k + w - and y2, c ; y2 = a&c - and y0, b ; y0 = (a|c)&b - add h, y1 ; h = h + S1 + CH + k + w + S0 - or y0, y2 ; y0 = MAJ = (a|c)&b)|(a&c) - add h, y0 ; h = h + S1 + CH + k + w + S0 + MAJ - ROTATE_ARGS -%endm - -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -;; void sha256_sse4(void *input_data, UINT32 digest[8], UINT64 num_blks) -;; arg 1 : pointer to input data -;; arg 2 : pointer to digest -;; arg 3 : Num blocks -section .text -global sha256_sse4 -align 32 -sha256_sse4: - push rbx -%ifndef LINUX - push rsi - push rdi -%endif - push rbp - push r13 - push r14 - push r15 - - sub rsp,STACK_SIZE -%ifndef LINUX - movdqa [rsp + _XMM_SAVE + 0*16],xmm6 - movdqa [rsp + _XMM_SAVE + 1*16],xmm7 - movdqa [rsp + _XMM_SAVE + 2*16],xmm8 - movdqa [rsp + _XMM_SAVE + 3*16],xmm9 - movdqa [rsp + _XMM_SAVE + 4*16],xmm10 - movdqa [rsp + _XMM_SAVE + 5*16],xmm11 - movdqa [rsp + _XMM_SAVE + 6*16],xmm12 -%endif - - shl NUM_BLKS, 6 ; convert to bytes - jz done_hash - add NUM_BLKS, INP ; pointer to end of data - mov [rsp + _INP_END], NUM_BLKS - - ;; load initial digest - mov a,[4*0 + CTX] - mov b,[4*1 + CTX] - mov c,[4*2 + CTX] - mov d,[4*3 + CTX] - mov e,[4*4 + CTX] - mov f,[4*5 + CTX] - mov g,[4*6 + CTX] - mov h,[4*7 + CTX] - - movdqa BYTE_FLIP_MASK, [PSHUFFLE_BYTE_FLIP_MASK wrt rip] - movdqa SHUF_00BA, [_SHUF_00BA wrt rip] - movdqa SHUF_DC00, [_SHUF_DC00 wrt rip] - -loop0: - lea TBL,[K256 wrt rip] - - ;; byte swap first 16 dwords - COPY_XMM_AND_BSWAP X0, [INP + 0*16], BYTE_FLIP_MASK - COPY_XMM_AND_BSWAP X1, [INP + 1*16], BYTE_FLIP_MASK - COPY_XMM_AND_BSWAP X2, [INP + 2*16], BYTE_FLIP_MASK - COPY_XMM_AND_BSWAP X3, [INP + 3*16], BYTE_FLIP_MASK - - mov [rsp + _INP], INP - - ;; schedule 48 input dwords, by doing 3 rounds of 16 each - mov SRND, 3 -align 16 -loop1: - movdqa XFER, [TBL + 0*16] - paddd XFER, X0 - movdqa [rsp + _XFER], XFER - FOUR_ROUNDS_AND_SCHED - - movdqa XFER, [TBL + 1*16] - paddd XFER, X0 - movdqa [rsp + _XFER], XFER - FOUR_ROUNDS_AND_SCHED - - movdqa XFER, [TBL + 2*16] - paddd XFER, X0 - movdqa [rsp + _XFER], XFER - FOUR_ROUNDS_AND_SCHED - - movdqa XFER, [TBL + 3*16] - paddd XFER, X0 - movdqa [rsp + _XFER], XFER - add TBL, 4*16 - FOUR_ROUNDS_AND_SCHED - - sub SRND, 1 - jne loop1 - - mov SRND, 2 -loop2: - paddd X0, [TBL + 0*16] - movdqa [rsp + _XFER], X0 - DO_ROUND 0 - DO_ROUND 1 - DO_ROUND 2 - DO_ROUND 3 - paddd X1, [TBL + 1*16] - movdqa [rsp + _XFER], X1 - add TBL, 2*16 - DO_ROUND 0 - DO_ROUND 1 - DO_ROUND 2 - DO_ROUND 3 - - movdqa X0, X2 - movdqa X1, X3 - - sub SRND, 1 - jne loop2 - - addm [4*0 + CTX],a - addm [4*1 + CTX],b - addm [4*2 + CTX],c - addm [4*3 + CTX],d - addm [4*4 + CTX],e - addm [4*5 + CTX],f - addm [4*6 + CTX],g - addm [4*7 + CTX],h - - mov INP, [rsp + _INP] - add INP, 64 - cmp INP, [rsp + _INP_END] - jne loop0 - -done_hash: -%ifndef LINUX - movdqa xmm6,[rsp + _XMM_SAVE + 0*16] - movdqa xmm7,[rsp + _XMM_SAVE + 1*16] - movdqa xmm8,[rsp + _XMM_SAVE + 2*16] - movdqa xmm9,[rsp + _XMM_SAVE + 3*16] - movdqa xmm10,[rsp + _XMM_SAVE + 4*16] - movdqa xmm11,[rsp + _XMM_SAVE + 5*16] - movdqa xmm12,[rsp + _XMM_SAVE + 6*16] -%endif - - add rsp, STACK_SIZE - - pop r15 - pop r14 - pop r13 - pop rbp -%ifndef LINUX - pop rdi - pop rsi -%endif - pop rbx - - ret - - -section .data -align 64 -K256: - dd 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 - dd 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 - dd 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 - dd 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 - dd 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc - dd 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da - dd 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 - dd 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 - dd 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 - dd 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 - dd 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 - dd 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 - dd 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 - dd 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 - dd 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 - dd 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 - -PSHUFFLE_BYTE_FLIP_MASK: ddq 0x0c0d0e0f08090a0b0405060700010203 - -; shuffle xBxA -> 00BA -_SHUF_00BA: ddq 0xFFFFFFFFFFFFFFFF0b0a090803020100 - -; shuffle xDxC -> DC00 -_SHUF_DC00: ddq 0x0b0a090803020100FFFFFFFFFFFFFFFF - -section .note.GNU-stack noalloc noexec nowrite progbits diff --git a/solo-ckpool-source/src/stratifier.c b/solo-ckpool-source/src/stratifier.c deleted file mode 100644 index 0d91852..0000000 --- a/solo-ckpool-source/src/stratifier.c +++ /dev/null @@ -1,8617 +0,0 @@ -/* - * Copyright 2014-2020,2023 Con Kolivas - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 3 of the License, or (at your option) - * any later version. See COPYING for more details. - */ - -#include "config.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef HAVE_ZMQ_H -#include -#endif - -#include "ckpool.h" -#include "libckpool.h" -#include "bitcoin.h" -#include "sha2.h" -#include "stratifier.h" -#include "uthash.h" -#include "utlist.h" -#include "connector.h" -#include "generator.h" - -/* Consistent across all pool instances */ -static const char *workpadding = "000000800000000000000000000000000000000000000000000000000000000000000000000000000000000080020000"; -static const char *scriptsig_header = "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff"; -static uchar scriptsig_header_bin[41]; -static const double nonces = 4294967296; - -/* Add unaccounted shares when they arrive, remove them with each update of - * rolling stats. */ -struct pool_stats { - tv_t start_time; - ts_t last_update; - - int workers; - int users; - int disconnected; - - int remote_workers; - int remote_users; - - /* Absolute shares stats */ - int64_t unaccounted_shares; - int64_t accounted_shares; - - /* Cycle of 32 to determine which users to dump stats on */ - uint8_t userstats_cycle; - - /* Shares per second for 1/5/15/60 minute rolling averages */ - double sps1; - double sps5; - double sps15; - double sps60; - - /* Diff shares stats */ - int64_t unaccounted_diff_shares; - int64_t accounted_diff_shares; - int64_t unaccounted_rejects; - int64_t accounted_rejects; - - /* Diff shares per second for 1/5/15... minute rolling averages */ - double dsps1; - double dsps5; - double dsps15; - double dsps60; - double dsps360; - double dsps1440; - double dsps10080; - - double network_diff; - double best_diff; -}; - -typedef struct pool_stats pool_stats_t; - -typedef struct genwork workbase_t; - -struct json_params { - json_t *method; - json_t *params; - json_t *id_val; - int64_t client_id; -}; - -typedef struct json_params json_params_t; - -/* Stratum json messages with their associated client id */ -struct smsg { - json_t *json_msg; - int64_t client_id; -}; - -typedef struct smsg smsg_t; - -struct userwb { - UT_hash_handle hh; - int64_t id; - - uchar *coinb2bin; // Coinb2 cointaining this user's address for generation - char *coinb2; - int coinb2len; // Length of user coinb2 -}; - -struct user_instance; -struct worker_instance; -struct stratum_instance; - -typedef struct user_instance user_instance_t; -typedef struct worker_instance worker_instance_t; -typedef struct stratum_instance stratum_instance_t; - -struct user_instance { - UT_hash_handle hh; - char username[128]; - int id; - char *secondaryuserid; - bool btcaddress; - bool script; - bool segwit; - - /* A linked list of all connected instances of this user */ - stratum_instance_t *clients; - - /* A linked list of all connected workers of this user */ - worker_instance_t *worker_instances; - - int workers; - int remote_workers; - char txnbin[48]; - int txnlen; - struct userwb *userwbs; /* Protected by instance lock */ - - double best_diff; /* Best share found by this user */ - int64_t best_ever; /* Best share ever found by this user */ - - int64_t shares; - - int64_t uadiff; /* Shares not yet accounted for in hashmeter */ - - double dsps1; /* Diff shares per second, 1 minute rolling average */ - double dsps5; /* ... 5 minute ... */ - double dsps60;/* etc */ - double dsps1440; - double dsps10080; - tv_t last_share; - tv_t last_decay; - - bool authorised; /* Has this username ever been authorised? */ - time_t auth_time; - time_t failed_authtime; /* Last time this username failed to authorise */ - int auth_backoff; /* How long to reject any auth attempts since last failure */ - bool throttled; /* Have we begun rejecting auth attempts */ -}; - -/* Combined data from workers with the same workername */ -struct worker_instance { - user_instance_t *user_instance; - char *workername; - - /* Number of stratum instances attached as this one worker */ - int instance_count; - - worker_instance_t *next; - worker_instance_t *prev; - - int64_t shares; - - int64_t uadiff; /* Shares not yet accounted for in hashmeter */ - - double dsps1; - double dsps5; - double dsps60; - double dsps1440; - double dsps10080; - tv_t last_share; - tv_t last_decay; - time_t start_time; - - double best_diff; /* Best share found by this worker */ - int64_t best_ever; /* Best share ever found by this worker */ - double mindiff; /* User chosen mindiff */ - - bool idle; - bool notified_idle; -}; - -typedef struct stratifier_data sdata_t; - -typedef struct proxy_base proxy_t; - -/* Per client stratum instance == workers */ -struct stratum_instance { - UT_hash_handle hh; - int64_t id; - - /* Virtualid used as unique local id for passthrough clients */ - int64_t virtualid; - - stratum_instance_t *recycled_next; - stratum_instance_t *recycled_prev; - - stratum_instance_t *user_next; - stratum_instance_t *user_prev; - - stratum_instance_t *node_next; - stratum_instance_t *node_prev; - - stratum_instance_t *remote_next; - stratum_instance_t *remote_prev; - - /* Descriptive of ID number and passthrough if any */ - char identity[128]; - - /* Reference count for when this instance is used outside of the - * instance_lock */ - int ref; - - char enonce1[36]; /* Fit up to 16 byte binary enonce1 */ - uchar enonce1bin[16]; - char enonce1var[20]; /* Fit up to 8 byte binary enonce1var */ - uint64_t enonce1_64; - int session_id; - - double diff; /* Current diff */ - double old_diff; /* Previous diff */ - int64_t diff_change_job_id; /* Last job_id we changed diff */ - - int64_t uadiff; /* Shares not yet accounted for in hashmeter */ - - double dsps1; /* Diff shares per second, 1 minute rolling average */ - double dsps5; /* ... 5 minute ... */ - double dsps60;/* etc */ - double dsps1440; - double dsps10080; - tv_t ldc; /* Last diff change */ - int ssdc; /* Shares since diff change */ - tv_t first_share; - tv_t last_share; - tv_t last_decay; - time_t first_invalid; /* Time of first invalid in run of non stale rejects */ - time_t upstream_invalid; /* As first_invalid but for upstream responses */ - time_t start_time; - - char address[INET6_ADDRSTRLEN]; - bool node; /* Is this a mining node */ - bool subscribed; - bool authorising; /* In progress, protected by instance_lock */ - bool authorised; - bool dropped; - bool idle; - int reject; /* Indicator that this client is having a run of rejects - * or other problem and should be dropped lazily if - * this is set to 2 */ - - int latency; /* Latency when on a mining node */ - - bool reconnect; /* This client really needs to reconnect */ - time_t reconnect_request; /* The time we sent a reconnect message */ - - user_instance_t *user_instance; - worker_instance_t *worker_instance; - - char *useragent; - char *workername; - char *password; - bool messages; /* Is this a client that understands stratum messages */ - int user_id; - int server; /* Which server is this instance bound to */ - - ckpool_t *ckp; - - time_t last_txns; /* Last time this worker requested txn hashes */ - time_t disconnected_time; /* Time this instance disconnected */ - - double suggest_diff; /* Stratum client suggested diff */ - double best_diff; /* Best share found by this instance */ - - sdata_t *sdata; /* Which sdata this client is bound to */ - proxy_t *proxy; /* Proxy this is bound to in proxy mode */ - int proxyid; /* Which proxy id */ - int subproxyid; /* Which subproxy */ - - bool passthrough; /* Is this a passthrough */ - bool trusted; /* Is this a trusted remote server */ - bool remote; /* Is this a remote client on a trusted remote server */ -}; - -struct share { - UT_hash_handle hh; - uchar hash[32]; - int64_t workbase_id; -}; - -typedef struct share share_t; - -struct proxy_base { - UT_hash_handle hh; - UT_hash_handle sh; /* For subproxy hashlist */ - proxy_t *next; /* For retired subproxies */ - proxy_t *prev; - int id; - int subid; - - /* Priority has the user id encoded in the high bits if it's not a - * global proxy. */ - int64_t priority; - - bool global; /* Is this a global proxy */ - int userid; /* Userid for non global proxies */ - - double diff; - - char baseurl[128]; - char url[128]; - char auth[128]; - char pass[128]; - char enonce1[32]; - uchar enonce1bin[16]; - int enonce1constlen; - int enonce1varlen; - - int nonce2len; - int enonce2varlen; - - bool subscribed; - bool notified; - - int64_t clients; /* Incrementing client count */ - int64_t max_clients; /* Maximum number of clients per subproxy */ - int64_t bound_clients; /* Currently actively bound clients */ - int64_t combined_clients; /* Total clients of all subproxies of a parent proxy */ - int64_t headroom; /* Temporary variable when calculating how many more clients can bind */ - - int subproxy_count; /* Number of subproxies */ - proxy_t *parent; /* Parent proxy of each subproxy */ - proxy_t *subproxies; /* Hashlist of subproxies sorted by subid */ - sdata_t *sdata; /* Unique stratifer data for each subproxy */ - bool dead; - bool deleted; -}; - -typedef struct session session_t; - -struct session { - UT_hash_handle hh; - int session_id; - uint64_t enonce1_64; - int64_t client_id; - int userid; - time_t added; - char address[INET6_ADDRSTRLEN]; -}; - -typedef struct txntable txntable_t; - -struct txntable { - UT_hash_handle hh; - int id; - char hash[68]; - char *data; - int refcount; - bool seen; -}; - -#define ID_AUTH 0 -#define ID_WORKINFO 1 -#define ID_AGEWORKINFO 2 -#define ID_SHARES 3 -#define ID_SHAREERR 4 -#define ID_POOLSTATS 5 -#define ID_WORKERSTATS 6 -#define ID_BLOCK 7 -#define ID_ADDRAUTH 8 -#define ID_HEARTBEAT 9 - -struct stratifier_data { - ckpool_t *ckp; - - char txnbin[48]; - int txnlen; - char dontxnbin[48]; - int dontxnlen; - - pool_stats_t stats; - /* Protects changes to pool stats */ - mutex_t stats_lock; - /* Protects changes to unaccounted pool stats */ - mutex_t uastats_lock; - - bool verbose; - - uint64_t enonce1_64; - - /* For protecting the txntable data */ - cklock_t txn_lock; - - /* For protecting the hashtable data */ - cklock_t workbase_lock; - - /* For the hashtable of all workbases */ - workbase_t *workbases; - workbase_t *current_workbase; - int workbases_generated; - txntable_t *txns; - int64_t txns_generated; - - /* Workbases from remote trusted servers */ - workbase_t *remote_workbases; - - /* Is this a node and unable to rebuild workinfos due to lack of txns */ - bool wbincomplete; - - /* Semaphore to serialise calls to add_base */ - sem_t update_sem; - /* Time we last sent out a stratum update */ - time_t update_time; - - int64_t workbase_id; - int64_t blockchange_id; - int session_id; - char lasthash[68]; - char lastswaphash[68]; - - ckmsgq_t *updateq; // Generator base work updates - ckmsgq_t *ssends; // Stratum sends - ckmsgq_t *srecvs; // Stratum receives - ckmsgq_t *sshareq; // Stratum share sends - ckmsgq_t *sauthq; // Stratum authorisations - ckmsgq_t *stxnq; // Transaction requests - - int user_instance_id; - - stratum_instance_t *stratum_instances; - stratum_instance_t *recycled_instances; - stratum_instance_t *node_instances; - stratum_instance_t *remote_instances; - - int64_t stratum_generated; - int64_t disconnected_generated; - int64_t userwbs_generated; - session_t *disconnected_sessions; - - user_instance_t *user_instances; - - /* Protects both stratum and user instances */ - cklock_t instance_lock; - - share_t *shares; - mutex_t share_lock; - - int64_t shares_generated; - - int proxy_count; /* Total proxies generated (not necessarily still alive) */ - proxy_t *proxy; /* Current proxy in use */ - proxy_t *proxies; /* Hashlist of all proxies */ - mutex_t proxy_lock; /* Protects all proxy data */ - proxy_t *subproxy; /* Which subproxy this sdata belongs to in proxy mode */ -}; - -typedef struct json_entry json_entry_t; - -struct json_entry { - json_entry_t *next; - json_entry_t *prev; - json_t *val; -}; - -/* Priority levels for generator messages */ -#define GEN_LAX 0 -#define GEN_NORMAL 1 -#define GEN_PRIORITY 2 - -/* For storing a set of messages within another lock, allowing us to dump them - * to the log outside of lock */ -static void add_msg_entry(char_entry_t **entries, char **buf) -{ - char_entry_t *entry; - - if (!*buf) - return; - entry = ckalloc(sizeof(char_entry_t)); - entry->buf = *buf; - *buf = NULL; - DL_APPEND(*entries, entry); -} - -static void notice_msg_entries(char_entry_t **entries) -{ - char_entry_t *entry, *tmpentry; - - DL_FOREACH_SAFE(*entries, entry, tmpentry) { - DL_DELETE(*entries, entry); - LOGNOTICE("%s", entry->buf); - free(entry->buf); - free(entry); - } -} - -static void info_msg_entries(char_entry_t **entries) -{ - char_entry_t *entry, *tmpentry; - - DL_FOREACH_SAFE(*entries, entry, tmpentry) { - DL_DELETE(*entries, entry); - LOGINFO("%s", entry->buf); - free(entry->buf); - free(entry); - } -} - -static const int witnessdata_size = 36; // commitment header + hash - -static void generate_coinbase(ckpool_t *ckp, workbase_t *wb) -{ - uint64_t *u64, g64, d64 = 0; - sdata_t *sdata = ckp->sdata; - char header[272]; - int len, ofs = 0; - ts_t now; - - /* Set fixed length coinb1 arrays to be more than enough */ - wb->coinb1 = ckzalloc(256); - wb->coinb1bin = ckzalloc(128); - - /* Strings in wb should have been zero memset prior. Generate binary - * templates first, then convert to hex */ - memcpy(wb->coinb1bin, scriptsig_header_bin, 41); - ofs += 41; // Fixed header length; - - ofs++; // Script length is filled in at the end @wb->coinb1bin[41]; - - /* Put block height at start of template */ - len = ser_number(wb->coinb1bin + ofs, wb->height); - ofs += len; - - /* Followed by flag */ - len = strlen(wb->flags) / 2; - wb->coinb1bin[ofs++] = len; - hex2bin(wb->coinb1bin + ofs, wb->flags, len); - ofs += len; - - /* Followed by timestamp */ - ts_realtime(&now); - len = ser_number(wb->coinb1bin + ofs, now.tv_sec); - ofs += len; - - /* Followed by our unique randomiser based on the nsec timestamp */ - len = ser_number(wb->coinb1bin + ofs, now.tv_nsec); - ofs += len; - - wb->enonce1varlen = ckp->nonce1length; - wb->enonce2varlen = ckp->nonce2length; - wb->coinb1bin[ofs++] = wb->enonce1varlen + wb->enonce2varlen; - - wb->coinb1len = ofs; - - len = wb->coinb1len - 41; - - len += wb->enonce1varlen; - len += wb->enonce2varlen; - - wb->coinb2bin = ckzalloc(512); - memcpy(wb->coinb2bin, "\x0a\x63\x6b\x70\x6f\x6f\x6c", 7); - wb->coinb2len = 7; - if (ckp->btcsig) { - int siglen = strlen(ckp->btcsig); - - LOGDEBUG("Len %d sig %s", siglen, ckp->btcsig); - if (siglen) { - wb->coinb2bin[wb->coinb2len++] = siglen; - memcpy(wb->coinb2bin + wb->coinb2len, ckp->btcsig, siglen); - wb->coinb2len += siglen; - } - } - len += wb->coinb2len; - - wb->coinb1bin[41] = len - 1; /* Set the length now */ - __bin2hex(wb->coinb1, wb->coinb1bin, wb->coinb1len); - LOGDEBUG("Coinb1: %s", wb->coinb1); - /* Coinbase 1 complete */ - - memcpy(wb->coinb2bin + wb->coinb2len, "\xff\xff\xff\xff", 4); - wb->coinb2len += 4; - - // Generation value - g64 = wb->coinbasevalue; - if (ckp->donvalid && ckp->donation > 0) { - double dbl64 = (double)g64 / 100 * ckp->donation; - - d64 = dbl64; - g64 -= d64; // To guarantee integers add up to the original coinbasevalue - wb->coinb2bin[wb->coinb2len++] = 2 + wb->insert_witness; - } else - wb->coinb2bin[wb->coinb2len++] = 1 + wb->insert_witness; - - u64 = (uint64_t *)&wb->coinb2bin[wb->coinb2len]; - *u64 = htole64(g64); - wb->coinb2len += 8; - - /* Coinb2 address goes here, takes up 23~25 bytes + 1 byte for length */ - - wb->coinb3len = 0; - wb->coinb3bin = ckzalloc(256 + wb->insert_witness * (8 + witnessdata_size + 2)); - - if (ckp->donvalid && ckp->donation > 0) { - u64 = (uint64_t *)wb->coinb3bin; - *u64 = htole64(d64); - wb->coinb3len += 8; - - wb->coinb3bin[wb->coinb3len++] = sdata->dontxnlen; - memcpy(wb->coinb3bin + wb->coinb3len, sdata->dontxnbin, sdata->dontxnlen); - wb->coinb3len += sdata->dontxnlen; - } else - ckp->donation = 0; - - if (wb->insert_witness) { - // 0 value - wb->coinb3len += 8; - - wb->coinb3bin[wb->coinb3len++] = witnessdata_size + 2; // total scriptPubKey size - wb->coinb3bin[wb->coinb3len++] = 0x6a; // OP_RETURN - wb->coinb3bin[wb->coinb3len++] = witnessdata_size; - - hex2bin(&wb->coinb3bin[wb->coinb3len], wb->witnessdata, witnessdata_size); - wb->coinb3len += witnessdata_size; - } - - wb->coinb3len += 4; // Blank lock - - if (!ckp->btcsolo) { - int coinbase_len, offset = 0; - char *coinbase, *cb; - json_t *val = NULL; - - /* Append the generation address and coinb3 in !solo mode */ - wb->coinb2bin[wb->coinb2len++] = sdata->txnlen; - memcpy(wb->coinb2bin + wb->coinb2len, sdata->txnbin, sdata->txnlen); - wb->coinb2len += sdata->txnlen; - memcpy(wb->coinb2bin + wb->coinb2len, wb->coinb3bin, wb->coinb3len); - wb->coinb2len += wb->coinb3len; - wb->coinb3len = 0; - dealloc(wb->coinb3bin); - - /* Set this only once */ - if (unlikely(!ckp->coinbase_valid)) { - /* We have enough to test the validity of the coinbase here */ - coinbase_len = wb->coinb1len + ckp->nonce1length + ckp->nonce2length + wb->coinb2len; - coinbase = ckzalloc(coinbase_len); - memcpy(coinbase, wb->coinb1bin, wb->coinb1len); - offset += wb->coinb1len; - /* Space for nonce1 and 2 */ - offset += ckp->nonce1length + ckp->nonce2length; - memcpy(coinbase + offset, wb->coinb2bin, wb->coinb2len); - offset += wb->coinb2len; - cb = bin2hex(coinbase, offset); - LOGDEBUG("Coinbase txn %s", cb); - free(coinbase); - if (generator_checktxn(ckp, cb, &val)) { - char *s = json_dumps(val, JSON_NO_UTF8 | JSON_COMPACT); - - json_decref(val); - LOGNOTICE("Coinbase transaction confirmed valid"); - LOGDEBUG("%s", s); - free(s); - } else { - /* This is a fatal error */ - LOGEMERG("Coinbase failed valid transaction check, aborting!"); - exit(1); - } - free(cb); - ckp->coinbase_valid = true; - LOGWARNING("Mining from any incoming username to address %s", ckp->btcaddress); - if (ckp->donation) - LOGWARNING("%.1f percent donation to %s", ckp->donation, ckp->donaddress); - } - } else if (unlikely(!ckp->coinbase_valid)) { - /* Create a sample coinbase to test its validity in solo mode */ - int coinbase_len, offset = 0; - char *coinbase, *cb; - json_t *val = NULL; - - coinbase_len = wb->coinb1len + ckp->nonce1length + ckp->nonce2length + wb->coinb2len + - sdata->txnlen + wb->coinb3len + 1; - coinbase = ckzalloc(coinbase_len); - memcpy(coinbase, wb->coinb1bin, wb->coinb1len); - offset += wb->coinb1len; - offset += ckp->nonce1length + ckp->nonce2length; - memcpy(coinbase + offset, wb->coinb2bin, wb->coinb2len); - offset += wb->coinb2len; - coinbase[offset] = sdata->txnlen; - offset += 1; - memcpy(coinbase + offset, sdata->txnbin, sdata->txnlen); - offset += sdata->txnlen; - memcpy(coinbase + offset, wb->coinb3bin, wb->coinb3len); - offset += wb->coinb3len; - cb = bin2hex(coinbase, offset); - LOGDEBUG("Coinbase txn %s", cb); - free(coinbase); - if (generator_checktxn(ckp, cb, &val)) { - char *s = json_dumps(val, JSON_NO_UTF8 | JSON_COMPACT); - - json_decref(val); - LOGNOTICE("Coinbase transaction confirmed valid"); - LOGDEBUG("%s", s); - free(s); - } else { - /* This is a fatal error */ - LOGEMERG("Coinbase failed valid transaction check, aborting!"); - exit(1); - } - free(cb); - ckp->coinbase_valid = true; - LOGWARNING("Mining solo to any incoming valid BTC address username"); - if (ckp->donation) - LOGWARNING("%.1f percent donation to %s", ckp->donation, ckp->donaddress); - } - - /* Set this just for node compatibility, though it's unused */ - wb->coinb2 = bin2hex(wb->coinb2bin, wb->coinb2len); - LOGDEBUG("Coinb2: %s", wb->coinb2); - /* Coinbases 2 +/- 3 templates complete */ - - snprintf(header, 270, "%s%s%s%s%s%s%s", - wb->bbversion, wb->prevhash, - "0000000000000000000000000000000000000000000000000000000000000000", - wb->ntime, wb->nbit, - "00000000", /* nonce */ - workpadding); - header[224] = 0; - LOGDEBUG("Header: %s", header); - hex2bin(wb->headerbin, header, 112); -} - -static void stratum_broadcast_update(sdata_t *sdata, const workbase_t *wb, bool clean); -static void stratum_broadcast_updates(sdata_t *sdata, bool clean); - -static void clear_userwb(sdata_t *sdata, int64_t id) -{ - user_instance_t *instance, *tmp; - - ck_wlock(&sdata->instance_lock); - HASH_ITER(hh, sdata->user_instances, instance, tmp) { - struct userwb *userwb; - - HASH_FIND_I64(instance->userwbs, &id, userwb); - if (!userwb) - continue; - HASH_DEL(instance->userwbs, userwb); - free(userwb->coinb2bin); - free(userwb->coinb2); - free(userwb); - } - ck_wunlock(&sdata->instance_lock); -} - -static void clear_workbase(ckpool_t *ckp, workbase_t *wb) -{ - if (ckp->btcsolo) - clear_userwb(ckp->sdata, wb->id); - free(wb->flags); - free(wb->txn_data); - free(wb->txn_hashes); - free(wb->logdir); - free(wb->coinb1bin); - free(wb->coinb1); - free(wb->coinb2bin); - free(wb->coinb2); - free(wb->coinb3bin); - json_decref(wb->merkle_array); - if (wb->json) - json_decref(wb->json); - free(wb); -} - -/* Remove all shares with a workbase id less than wb_id for block changes */ -static void purge_share_hashtable(sdata_t *sdata, const int64_t wb_id) -{ - share_t *share, *tmp; - int purged = 0; - - mutex_lock(&sdata->share_lock); - HASH_ITER(hh, sdata->shares, share, tmp) { - if (share->workbase_id < wb_id) { - HASH_DEL(sdata->shares, share); - dealloc(share); - purged++; - } - } - mutex_unlock(&sdata->share_lock); - - if (purged) - LOGINFO("Cleared %d shares from share hashtable", purged); -} - -/* Remove all shares with a workbase id == wb_id being discarded */ -static void age_share_hashtable(sdata_t *sdata, const int64_t wb_id) -{ - share_t *share, *tmp; - int aged = 0; - - mutex_lock(&sdata->share_lock); - HASH_ITER(hh, sdata->shares, share, tmp) { - if (share->workbase_id == wb_id) { - HASH_DEL(sdata->shares, share); - dealloc(share); - aged++; - } - } - mutex_unlock(&sdata->share_lock); - - if (aged) - LOGINFO("Aged %d shares from share hashtable", aged); -} - -/* Append a bulk list already created to the ssends list */ -static void ssend_bulk_append(sdata_t *sdata, ckmsg_t *bulk_send, const int messages) -{ - ckmsgq_t *ssends = sdata->ssends; - - mutex_lock(ssends->lock); - ssends->messages += messages; - DL_CONCAT(ssends->msgs, bulk_send); - pthread_cond_signal(ssends->cond); - mutex_unlock(ssends->lock); -} - -/* As ssend_bulk_append but for high priority messages to be put at the front - * of the list. */ -static void ssend_bulk_prepend(sdata_t *sdata, ckmsg_t *bulk_send, const int messages) -{ - ckmsgq_t *ssends = sdata->ssends; - ckmsg_t *tmp; - - mutex_lock(ssends->lock); - tmp = ssends->msgs; - ssends->msgs = bulk_send; - ssends->messages += messages; - DL_CONCAT(ssends->msgs, tmp); - pthread_cond_signal(ssends->cond); - mutex_unlock(ssends->lock); -} - -/* Send a json msg to an upstream trusted remote server */ -static void upstream_json(ckpool_t *ckp, json_t *val) -{ - char *msg; - - msg = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER | JSON_COMPACT | JSON_EOL); - /* Connector absorbs and frees msg */ - connector_upstream_msg(ckp, msg); -} - -/* Upstream a json msgtype */ -static void upstream_json_msgtype(ckpool_t *ckp, json_t *val, const int msg_type) -{ - json_set_string(val, "method", stratum_msgs[msg_type]); - upstream_json(ckp, val); -} - -/* Upstream a json msgtype, duplicating the json */ -static void upstream_msgtype(ckpool_t *ckp, const json_t *val, const int msg_type) -{ - json_t *json_msg = json_deep_copy(val); - - json_set_string(json_msg, "method", stratum_msgs[msg_type]); - upstream_json(ckp, json_msg); - json_decref(json_msg); -} - -static void send_node_workinfo(ckpool_t *ckp, sdata_t *sdata, const workbase_t *wb) -{ - stratum_instance_t *client; - ckmsg_t *bulk_send = NULL; - int messages = 0; - json_t *wb_val; - - wb_val = json_object(); - - json_set_int(wb_val, "jobid", wb->mapped_id); - json_set_string(wb_val, "target", wb->target); - json_set_double(wb_val, "diff", wb->diff); - json_set_int(wb_val, "version", wb->version); - json_set_int(wb_val, "curtime", wb->curtime); - json_set_string(wb_val, "prevhash", wb->prevhash); - json_set_string(wb_val, "ntime", wb->ntime); - json_set_string(wb_val, "bbversion", wb->bbversion); - json_set_string(wb_val, "nbit", wb->nbit); - json_set_int(wb_val, "coinbasevalue", wb->coinbasevalue); - json_set_int(wb_val, "height", wb->height); - json_set_string(wb_val, "flags", wb->flags); - json_set_int(wb_val, "txns", wb->txns); - json_set_string(wb_val, "txn_hashes", wb->txn_hashes); - json_set_int(wb_val, "merkles", wb->merkles); - json_object_set_new_nocheck(wb_val, "merklehash", json_deep_copy(wb->merkle_array)); - json_set_string(wb_val, "coinb1", wb->coinb1); - json_set_int(wb_val, "enonce1varlen", wb->enonce1varlen); - json_set_int(wb_val, "enonce2varlen", wb->enonce2varlen); - json_set_int(wb_val, "coinb1len", wb->coinb1len); - json_set_int(wb_val, "coinb2len", wb->coinb2len); - json_set_string(wb_val, "coinb2", wb->coinb2); - - ck_rlock(&sdata->instance_lock); - DL_FOREACH2(sdata->node_instances, client, node_next) { - ckmsg_t *client_msg; - smsg_t *msg; - json_t *json_msg = json_deep_copy(wb_val); - - json_set_string(json_msg, "node.method", stratum_msgs[SM_WORKINFO]); - client_msg = ckalloc(sizeof(ckmsg_t)); - msg = ckzalloc(sizeof(smsg_t)); - msg->json_msg = json_msg; - msg->client_id = client->id; - client_msg->data = msg; - DL_APPEND(bulk_send, client_msg); - messages++; - } - DL_FOREACH2(sdata->remote_instances, client, remote_next) { - ckmsg_t *client_msg; - smsg_t *msg; - json_t *json_msg = json_deep_copy(wb_val); - - json_set_string(json_msg, "method", stratum_msgs[SM_WORKINFO]); - client_msg = ckalloc(sizeof(ckmsg_t)); - msg = ckzalloc(sizeof(smsg_t)); - msg->json_msg = json_msg; - msg->client_id = client->id; - client_msg->data = msg; - DL_APPEND(bulk_send, client_msg); - messages++; - } - ck_runlock(&sdata->instance_lock); - - if (ckp->remote) - upstream_msgtype(ckp, wb_val, SM_WORKINFO); - - json_decref(wb_val); - - if (bulk_send) { - LOGINFO("Sending workinfo to mining nodes"); - ssend_bulk_append(sdata, bulk_send, messages); - } -} - -static json_t *generate_workinfo(ckpool_t *ckp, const workbase_t *wb, const char *func) -{ - char cdfield[64]; - json_t *val; - - sprintf(cdfield, "%lu,%lu", wb->gentime.tv_sec, wb->gentime.tv_nsec); - - JSON_CPACK(val, "{sI,ss,ss,ss,ss,ss,ss,ss,ss,sI,so,ss,ss,ss,ss}", - "workinfoid", wb->id, - "poolinstance", ckp->name, - "transactiontree", wb->txn_hashes, - "prevhash", wb->prevhash, - "coinbase1", wb->coinb1, - "coinbase2", wb->coinb2, - "version", wb->bbversion, - "ntime", wb->ntime, - "bits", wb->nbit, - "reward", wb->coinbasevalue, - "merklehash", json_deep_copy(wb->merkle_array), - "createdate", cdfield, - "createby", "code", - "createcode", func, - "createinet", ckp->serverurl[0]); - return val; -} - -static void send_workinfo(ckpool_t *ckp, sdata_t *sdata, const workbase_t *wb) -{ - if (!ckp->proxy) - send_node_workinfo(ckp, sdata, wb); -} - -/* Entered with instance_lock held, make sure wb can't be pulled from us */ -static void __generate_userwb(sdata_t *sdata, workbase_t *wb, user_instance_t *user) -{ - struct userwb *userwb; - int64_t id = wb->id; - - /* Make sure this user doesn't have this userwb already */ - HASH_FIND_I64(user->userwbs, &id, userwb); - if (unlikely(userwb)) - return; - - sdata->userwbs_generated++; - userwb = ckzalloc(sizeof(struct userwb)); - userwb->id = id; - userwb->coinb2bin = ckalloc(wb->coinb2len + 1 + user->txnlen + wb->coinb3len); - memcpy(userwb->coinb2bin, wb->coinb2bin, wb->coinb2len); - userwb->coinb2len = wb->coinb2len; - userwb->coinb2bin[userwb->coinb2len++] = user->txnlen; - memcpy(userwb->coinb2bin + userwb->coinb2len, user->txnbin, user->txnlen); - userwb->coinb2len += user->txnlen; - memcpy(userwb->coinb2bin + userwb->coinb2len, wb->coinb3bin, wb->coinb3len); - userwb->coinb2len += wb->coinb3len; - userwb->coinb2 = bin2hex(userwb->coinb2bin, userwb->coinb2len); - HASH_ADD_I64(user->userwbs, id, userwb); -} - -static void generate_userwbs(sdata_t *sdata, workbase_t *wb) -{ - user_instance_t *instance, *tmp; - - ck_wlock(&sdata->instance_lock); - HASH_ITER(hh, sdata->user_instances, instance, tmp) { - if (!instance->btcaddress) - continue; - __generate_userwb(sdata, wb, instance); - } - ck_wunlock(&sdata->instance_lock); -} - -/* Add a new workbase to the table of workbases. Sdata is the global data in - * pool mode but unique to each subproxy in proxy mode */ -static void add_base(ckpool_t *ckp, sdata_t *sdata, workbase_t *wb, bool *new_block) -{ - sdata_t *ckp_sdata = ckp->sdata; - pool_stats_t *stats = &sdata->stats; - double old_diff = stats->network_diff; - workbase_t *tmp, *tmpa; - int len, ret; - - ts_realtime(&wb->gentime); - /* Stats network_diff is not protected by lock but is not a critical - * value */ - wb->network_diff = diff_from_nbits(wb->headerbin + 72); - if (wb->network_diff < 1) - wb->network_diff = 1; - stats->network_diff = wb->network_diff; - if (stats->network_diff != old_diff) - LOGWARNING("Network diff set to %.1f", stats->network_diff); - len = strlen(ckp->logdir) + 8 + 1 + 16 + 1; - wb->logdir = ckzalloc(len); - - /* In proxy mode, the wb->id is received in the notify update and - * we set workbase_id from it. In server mode the stratifier is - * setting the workbase_id */ - ck_wlock(&sdata->workbase_lock); - ckp_sdata->workbases_generated++; - if (!ckp->proxy) - wb->mapped_id = wb->id = sdata->workbase_id++; - else - sdata->workbase_id = wb->id; - if (strncmp(wb->prevhash, sdata->lasthash, 64)) { - char bin[32], swap[32]; - - *new_block = true; - memcpy(sdata->lasthash, wb->prevhash, 65); - hex2bin(bin, sdata->lasthash, 32); - swap_256(swap, bin); - __bin2hex(sdata->lastswaphash, swap, 32); - sdata->blockchange_id = wb->id; - } - if (*new_block && ckp->logshares) { - sprintf(wb->logdir, "%s%08x/", ckp->logdir, wb->height); - ret = mkdir(wb->logdir, 0750); - if (unlikely(ret && errno != EEXIST)) - LOGERR("Failed to create log directory %s", wb->logdir); - } - sprintf(wb->idstring, "%016lx", wb->id); - if (ckp->logshares) - sprintf(wb->logdir, "%s%08x/%s", ckp->logdir, wb->height, wb->idstring); - - HASH_ADD_I64(sdata->workbases, id, wb); - if (sdata->current_workbase) - tv_time(&sdata->current_workbase->retired); - sdata->current_workbase = wb; - - /* Is this long enough to ensure we don't dereference a workbase - * immediately? Should be unless clock changes 10 minutes so we use - * ts_realtime */ - HASH_ITER(hh, sdata->workbases, tmp, tmpa) { - if (HASH_COUNT(sdata->workbases) < 3) - break; - if (wb == tmp) - continue; - if (tmp->readcount) - continue; - /* Age old workbases older than 10 minutes old */ - if (tmp->gentime.tv_sec < wb->gentime.tv_sec - 600) { - HASH_DEL(sdata->workbases, tmp); - ck_wunlock(&sdata->workbase_lock); - - /* Drop lock to avoid recursive locks */ - age_share_hashtable(sdata, tmp->id); - clear_workbase(ckp, tmp); - - ck_wlock(&sdata->workbase_lock); - } - } - ck_wunlock(&sdata->workbase_lock); - - /* This wb can't be pulled out from under us so no workbase lock is - * required to generate_userwbs */ - if (ckp->btcsolo) - generate_userwbs(sdata, wb); - - if (*new_block) - purge_share_hashtable(sdata, wb->id); - - if (!ckp->passthrough) - send_workinfo(ckp, sdata, wb); -} - -static void broadcast_ping(sdata_t *sdata); - -#define REFCOUNT_REMOTE 20 -#define REFCOUNT_LOCAL 10 -#define REFCOUNT_RETURNED 5 - -/* Submit the transactions in node/remote mode so the local btcd has all the - * transactions that will go into the next blocksolve. */ -static void submit_transaction(ckpool_t *ckp, const char *hash) -{ - char *buf; - - if (unlikely(!ckp->generator_ready)) - return; - ASPRINTF(&buf, "submittxn:%s", hash); - send_proc(ckp->generator,buf); - free(buf); -} - -/* Build a hashlist of all transactions, allowing us to compare with the list of - * existing transactions to determine which need to be propagated */ -static bool add_txn(ckpool_t *ckp, sdata_t *sdata, txntable_t **txns, const char *hash, - const char *data, bool local) -{ - bool found = false; - txntable_t *txn; - - /* Look for transactions we already know about and increment their - * refcount if we're still using them. */ - ck_wlock(&sdata->txn_lock); - HASH_FIND_STR(sdata->txns, hash, txn); - if (txn) { - /* If we already have this in our transaction table but haven't - * seen it in a while, it is reappearing in work and we should - * propagate it again in update_txns. */ - if (txn->refcount > REFCOUNT_RETURNED) - found = true; - if (!local) - txn->refcount = REFCOUNT_REMOTE; - else if (txn->refcount < REFCOUNT_LOCAL) - txn->refcount = REFCOUNT_LOCAL; - txn->seen = true; - } - ck_wunlock(&sdata->txn_lock); - - if (found) - return false; - - txn = ckzalloc(sizeof(txntable_t)); - memcpy(txn->hash, hash, 65); - if (local) - txn->data = strdup(data); - else { - /* Get the data from our local bitcoind as a way of confirming it - * already knows about this transaction. */ - txn->data = generator_get_txn(ckp, hash); - if (!txn->data) { - /* If our local bitcoind hasn't seen this transaction, - * submit it for mempools to be ~synchronised */ - submit_transaction(ckp, data); - txn->data = strdup(data); - } - } - - txn->seen = true; - if (!local || ckp->node) - txn->refcount = REFCOUNT_REMOTE; - else - txn->refcount = REFCOUNT_LOCAL; - HASH_ADD_STR(*txns, hash, txn); - - return true; -} - -static void send_node_transactions(ckpool_t *ckp, sdata_t *sdata, const json_t *txn_val) -{ - stratum_instance_t *client; - ckmsg_t *bulk_send = NULL; - ckmsg_t *client_msg; - int messages = 0; - json_t *json_msg; - smsg_t *msg; - - ck_rlock(&sdata->instance_lock); - DL_FOREACH2(sdata->node_instances, client, node_next) { - json_msg = json_deep_copy(txn_val); - json_set_string(json_msg, "node.method", stratum_msgs[SM_TRANSACTIONS]); - client_msg = ckalloc(sizeof(ckmsg_t)); - msg = ckzalloc(sizeof(smsg_t)); - msg->json_msg = json_msg; - msg->client_id = client->id; - client_msg->data = msg; - DL_APPEND(bulk_send, client_msg); - messages++; - } - DL_FOREACH2(sdata->remote_instances, client, remote_next) { - json_msg = json_deep_copy(txn_val); - json_set_string(json_msg, "method", stratum_msgs[SM_TRANSACTIONS]); - client_msg = ckalloc(sizeof(ckmsg_t)); - msg = ckzalloc(sizeof(smsg_t)); - msg->json_msg = json_msg; - msg->client_id = client->id; - client_msg->data = msg; - DL_APPEND(bulk_send, client_msg); - messages++; - } - ck_runlock(&sdata->instance_lock); - - if (ckp->remote) - upstream_msgtype(ckp, txn_val, SM_TRANSACTIONS); - - if (bulk_send) { - LOGINFO("Sending transactions to mining nodes"); - ssend_bulk_append(sdata, bulk_send, messages); - } -} - -static void submit_transaction_array(ckpool_t *ckp, const json_t *arr) -{ - json_t *arr_val; - size_t index; - - json_array_foreach(arr, index, arr_val) { - submit_transaction(ckp, json_string_value(arr_val)); - } -} - -static void clear_txn(txntable_t *txn) -{ - free(txn->data); - free(txn); -} - -static void update_txns(ckpool_t *ckp, sdata_t *sdata, txntable_t *txns, bool local) -{ - json_t *val, *txn_array = json_array(), *purged_txns = json_array(); - int added = 0, purged = 0; - txntable_t *tmp, *tmpa; - - /* Find which transactions have their refcount decremented to zero - * and remove them. */ - ck_wlock(&sdata->txn_lock); - HASH_ITER(hh, sdata->txns, tmp, tmpa) { - json_t *txn_val; - - if (tmp->seen) { - tmp->seen = false; - continue; - } - if (tmp->refcount-- > 0) - continue; - HASH_DEL(sdata->txns, tmp); - txn_val = json_string(tmp->data); - json_array_append_new(purged_txns, txn_val); - clear_txn(tmp); - purged++; - } - /* Add the new transactions to the transaction table */ - HASH_ITER(hh, txns, tmp, tmpa) { - txntable_t *found; - json_t *txn_val; - - HASH_DEL(txns, tmp); - /* Propagate transaction here */ - JSON_CPACK(txn_val, "{ss,ss}", "hash", tmp->hash, "data", tmp->data); - json_array_append_new(txn_array, txn_val); - - /* Check one last time this txn hasn't already been added in the - * interim. This can happen in add_txn intentionally for a - * transaction that has reappeared. */ - HASH_FIND_STR(sdata->txns, tmp->hash, found); - if (found) { - clear_txn(tmp); - continue; - } - - /* Move to the sdata transaction table */ - HASH_ADD_STR(sdata->txns, hash, tmp); - sdata->txns_generated++; - added++; - } - ck_wunlock(&sdata->txn_lock); - - if (added) { - JSON_CPACK(val, "{so}", "transaction", txn_array); - send_node_transactions(ckp, sdata, val); - json_decref(val); - } else - json_decref(txn_array); - - /* Submit transactions to bitcoind again when we're purging them in - * case they've been removed from its mempool as well and we need them - * again in the future for a remote workinfo that hasn't forgotten - * about them. */ - if (purged && ckp->nodeservers) - submit_transaction_array(ckp, purged_txns); - json_decref(purged_txns); - - if (added || purged) { - LOGINFO("Stratifier added %d %stransactions and purged %d", added, - local ? "" : "remote ", purged); - } -} - -/* Distill down a set of transactions into an efficient tree arrangement for - * stratum messages and fast work assembly. */ -static txntable_t *wb_merkle_bin_txns(ckpool_t *ckp, sdata_t *sdata, workbase_t *wb, - json_t *txn_array, bool local) -{ - int i, j, binleft, binlen; - txntable_t *txns = NULL; - json_t *arr_val; - uchar *hashbin; - - wb->txns = json_array_size(txn_array); - wb->merkles = 0; - binlen = wb->txns * 32 + 32; - hashbin = alloca(binlen + 32); - memset(hashbin, 0, 32); - binleft = binlen / 32; - if (wb->txns) { - int len = 1, ofs = 0; - const char *txn; - - for (i = 0; i < wb->txns; i++) { - arr_val = json_array_get(txn_array, i); - txn = json_string_value(json_object_get(arr_val, "data")); - if (!txn) { - LOGWARNING("json_string_value fail - cannot find transaction data"); - goto out; - } - len += strlen(txn); - } - - wb->txn_data = ckzalloc(len + 1); - wb->txn_hashes = ckzalloc(wb->txns * 65 + 1); - memset(wb->txn_hashes, 0x20, wb->txns * 65); // Spaces - - for (i = 0; i < wb->txns; i++) { - const char *txid, *hash; - char binswap[32]; - - arr_val = json_array_get(txn_array, i); - - // Post-segwit, txid returns the tx hash without witness data - txid = json_string_value(json_object_get(arr_val, "txid")); - hash = json_string_value(json_object_get(arr_val, "hash")); - if (!txid) - txid = hash; - if (unlikely(!txid)) { - LOGERR("Missing txid for transaction in wb_merkle_bins"); - goto out; - } - txn = json_string_value(json_object_get(arr_val, "data")); - add_txn(ckp, sdata, &txns, hash, txn, local); - len = strlen(txn); - memcpy(wb->txn_data + ofs, txn, len); - ofs += len; - if (!hex2bin(binswap, txid, 32)) { - LOGERR("Failed to hex2bin hash in gbt_merkle_bins"); - goto out; - } - memcpy(wb->txn_hashes + i * 65, txid, 64); - bswap_256(hashbin + 32 + 32 * i, binswap); - } - } else - wb->txn_hashes = ckzalloc(1); - wb->merkle_array = json_array(); - if (binleft > 1) { - while (42) { - if (binleft == 1) - break; - memcpy(&wb->merklebin[wb->merkles][0], hashbin + 32, 32); - __bin2hex(&wb->merklehash[wb->merkles][0], &wb->merklebin[wb->merkles][0], 32); - json_array_append_new(wb->merkle_array, json_string(&wb->merklehash[wb->merkles][0])); - LOGDEBUG("MerkleHash %d %s",wb->merkles, &wb->merklehash[wb->merkles][0]); - wb->merkles++; - if (binleft % 2) { - memcpy(hashbin + binlen, hashbin + binlen - 32, 32); - binlen += 32; - binleft++; - } - for (i = 32, j = 64; j < binlen; i += 32, j += 64) - gen_hash(hashbin + j, hashbin + i, 64); - binleft /= 2; - binlen = binleft * 32; - } - } - LOGNOTICE("Stored %s workbase with %d transactions", local ? "local" : "remote", - wb->txns); -out: - return txns; -} - -static const unsigned char witness_nonce[32] = {0}; -static const int witness_nonce_size = sizeof(witness_nonce); -static const unsigned char witness_header[] = {0xaa, 0x21, 0xa9, 0xed}; -static const int witness_header_size = sizeof(witness_header); - -static void gbt_witness_data(workbase_t *wb, json_t *txn_array) -{ - int i, binlen, txncount = json_array_size(txn_array); - const char* hash; - json_t *arr_val; - uchar *hashbin; - - binlen = txncount * 32 + 32; - hashbin = alloca(binlen + 32); - memset(hashbin, 0, 32); - - for (i = 0; i < txncount; i++) { - char binswap[32]; - - arr_val = json_array_get(txn_array, i); - hash = json_string_value(json_object_get(arr_val, "hash")); - if (unlikely(!hash)) { - LOGERR("Hash missing for transaction"); - return; - } - if (!hex2bin(binswap, hash, 32)) { - LOGERR("Failed to hex2bin hash in gbt_witness_data"); - return; - } - bswap_256(hashbin + 32 + 32 * i, binswap); - } - - // Build merkle root (copied from libblkmaker) - for (txncount++ ; txncount > 1 ; txncount /= 2) { - if (txncount % 2) { - // Odd number, duplicate the last - memcpy(hashbin + 32 * txncount, hashbin + 32 * (txncount - 1), 32); - txncount++; - } - for (i = 0; i < txncount; i += 2) { - // We overlap input and output here, on the first pair - gen_hash(hashbin + 32 * i, hashbin + 32 * (i / 2), 64); - } - } - - memcpy(hashbin + 32, &witness_nonce, witness_nonce_size); - gen_hash(hashbin, hashbin + witness_header_size, 32 + witness_nonce_size); - memcpy(hashbin, witness_header, witness_header_size); - __bin2hex(wb->witnessdata, hashbin, 32 + witness_header_size); - wb->insert_witness = true; -} - -/* This function assumes it will only receive a valid json gbt base template - * since checking should have been done earlier, and creates the base template - * for generating work templates. This is a ckmsgq so all uses of this function - * are serialised. */ -static void block_update(ckpool_t *ckp, int *prio) -{ - bool new_block = false, ret = false; - const char *witnessdata_check; - sdata_t *sdata = ckp->sdata; - json_t *txn_array; - txntable_t *txns; - int retries = 0; - workbase_t *wb; - -retry: - wb = generator_getbase(ckp); - if (unlikely(!wb)) { - if (retries++ < 5 || *prio == GEN_PRIORITY) { - LOGWARNING("Generator returned failure in update_base, retry #%d", retries); - goto retry; - } - LOGWARNING("Generator failed in update_base after retrying"); - goto out; - } - if (unlikely(retries)) - LOGWARNING("Generator succeeded in update_base after retrying"); - - wb->ckp = ckp; - - txn_array = json_object_get(wb->json, "transactions"); - txns = wb_merkle_bin_txns(ckp, sdata, wb, txn_array, true); - - wb->insert_witness = false; - - witnessdata_check = json_string_value(json_object_get(wb->json, "default_witness_commitment")); - if (likely(witnessdata_check)) { - LOGDEBUG("Default witness commitment present, adding witness data"); - gbt_witness_data(wb, txn_array); - // Verify against the pre-calculated value if it exists. Skip the size/OP_RETURN bytes. - if (wb->insert_witness && safecmp(witnessdata_check + 4, wb->witnessdata) != 0) - LOGERR("Witness from btcd: %s. Calculated Witness: %s", witnessdata_check + 4, wb->witnessdata); - } - - generate_coinbase(ckp, wb); - - add_base(ckp, sdata, wb, &new_block); - - if (new_block) - LOGNOTICE("Block hash changed to %s", sdata->lastswaphash); - if (ckp->btcsolo) - stratum_broadcast_updates(sdata, new_block); - else - stratum_broadcast_update(sdata, wb, new_block); - ret = true; - LOGINFO("Broadcast updated stratum base"); - /* Update transactions after stratum broadcast to not delay - * propagation. */ - if (likely(txns)) - update_txns(ckp, sdata, txns, true); - /* Reset the update time to avoid stacked low priority notifies. Bring - * forward the next notify in case of a new block. */ - sdata->update_time = time(NULL); - if (new_block) - sdata->update_time -= ckp->update_interval / 2; -out: - - cksem_post(&sdata->update_sem); - - /* Send a ping to miners if we fail to get a base to keep them - * connected while bitcoind recovers(?) */ - if (unlikely(!ret)) { - LOGINFO("Broadcast ping due to failed stratum base update"); - broadcast_ping(sdata); - } - free(prio); -} - -#define SSEND_PREPEND 0 -#define SSEND_APPEND 1 - -/* Downstream a json message to all remote servers except for the one matching - * client_id */ -static void downstream_json(sdata_t *sdata, const json_t *val, const int64_t client_id, - const int prio) -{ - stratum_instance_t *client; - ckmsg_t *bulk_send = NULL; - int messages = 0; - - ck_rlock(&sdata->instance_lock); - DL_FOREACH2(sdata->remote_instances, client, remote_next) { - ckmsg_t *client_msg; - json_t *json_msg; - smsg_t *msg; - - /* Don't send remote workinfo back to same remote */ - if (client->id == client_id) - continue; - json_msg = json_deep_copy(val); - client_msg = ckalloc(sizeof(ckmsg_t)); - msg = ckzalloc(sizeof(smsg_t)); - msg->json_msg = json_msg; - msg->client_id = client->id; - client_msg->data = msg; - DL_APPEND(bulk_send, client_msg); - messages++; - } - ck_runlock(&sdata->instance_lock); - - if (bulk_send) { - LOGINFO("Sending json to %d remote servers", messages); - switch (prio) { - case SSEND_PREPEND: - ssend_bulk_prepend(sdata, bulk_send, messages); - break; - case SSEND_APPEND: - ssend_bulk_append(sdata, bulk_send, messages); - break; - } - } -} - -/* Find any transactions that are missing from our transaction table during - * rebuild_txns by requesting their data from another server. */ -static void request_txns(ckpool_t *ckp, sdata_t *sdata, json_t *txns) -{ - json_t *val; - - JSON_CPACK(val, "{so}", "hash", txns); - if (ckp->remote) - upstream_msgtype(ckp, val, SM_REQTXNS); - else if (ckp->node) { - /* Nodes have no way to signal upstream pool yet */ - } else { - /* We don't know which remote sent the transaction hash so ask - * all of them for it */ - json_set_string(val, "method", stratum_msgs[SM_REQTXNS]); - downstream_json(sdata, val, 0, SSEND_APPEND); - } -} - -/* Rebuilds transactions from txnhashes to be able to construct wb_merkle_bins - * on remote workbases */ -static bool rebuild_txns(ckpool_t *ckp, sdata_t *sdata, workbase_t *wb) -{ - const char *hashes = wb->txn_hashes; - json_t *txn_array, *missing_txns; - char hash[68] = {}; - bool ret = false; - txntable_t *txns; - int i, len = 0; - - /* We'll only see this on testnet now */ - if (unlikely(!wb->txns)) { - ret = true; - goto out; - } - if (likely(hashes)) - len = strlen(hashes); - if (!hashes || !len) - goto out; - - if (unlikely(len < wb->txns * 65)) { - LOGERR("Truncated transactions in rebuild_txns only %d long", len); - goto out; - } - ret = true; - txn_array = json_array(); - missing_txns = json_array(); - - for (i = 0; i < wb->txns; i++) { - json_t *txn_val = NULL; - txntable_t *txn; - char *data; - - memcpy(hash, hashes + i * 65, 64); - - ck_wlock(&sdata->txn_lock); - HASH_FIND_STR(sdata->txns, hash, txn); - if (likely(txn)) { - txn->refcount = REFCOUNT_REMOTE; - txn->seen = true; - JSON_CPACK(txn_val, "{ss,ss}", - "hash", hash, "data", txn->data); - json_array_append_new(txn_array, txn_val); - } - ck_wunlock(&sdata->txn_lock); - - if (likely(txn_val)) - continue; - /* See if we can find it in our local bitcoind */ - data = generator_get_txn(ckp, hash); - if (!data) { - txn_val = json_string(hash); - json_array_append_new(missing_txns, txn_val); - ret = false; - continue; - } - - /* We've found it, let's add it to the table */ - ck_wlock(&sdata->txn_lock); - /* One last check in case it got added while we dropped the lock */ - HASH_FIND_STR(sdata->txns, hash, txn); - if (likely(!txn)) { - txn = ckzalloc(sizeof(txntable_t)); - memcpy(txn->hash, hash, 65); - txn->data = data; - HASH_ADD_STR(sdata->txns, hash, txn); - sdata->txns_generated++; - } else { - free(data); - } - txn->refcount = REFCOUNT_REMOTE; - txn->seen = true; - JSON_CPACK(txn_val, "{ss,ss}", - "hash", hash, "data", txn->data); - json_array_append_new(txn_array, txn_val); - ck_wunlock(&sdata->txn_lock); - } - - if (ret) { - wb->incomplete = false; - LOGINFO("Rebuilt txns into workbase with %d transactions", i); - /* These two structures are regenerated so free their ram */ - json_decref(wb->merkle_array); - dealloc(wb->txn_hashes); - txns = wb_merkle_bin_txns(ckp, sdata, wb, txn_array, false); - if (likely(txns)) - update_txns(ckp, sdata, txns, false); - } else { - if (!sdata->wbincomplete) { - sdata->wbincomplete = true; - if (ckp->proxy) - LOGWARNING("Unable to rebuild transactions to create workinfo, ignore displayed hashrate"); - } - LOGINFO("Failed to find all txns in rebuild_txns"); - request_txns(ckp, sdata, missing_txns); - } - - json_decref(txn_array); - json_decref(missing_txns); -out: - return ret; -} - -/* Remote workbases are keyed by the combined values of wb->id and - * wb->client_id to prevent collisions in the unlikely event two remote - * servers are generating the same workbase ids. */ -static void __add_to_remote_workbases(sdata_t *sdata, workbase_t *wb) -{ - HASH_ADD(hh, sdata->remote_workbases, id, sizeof(int64_t) * 2, wb); -} - -static void add_remote_base(ckpool_t *ckp, sdata_t *sdata, workbase_t *wb) -{ - stratum_instance_t *client; - ckmsg_t *bulk_send = NULL; - workbase_t *tmp, *tmpa; - int messages = 0; - int64_t skip; - json_t *val; - - ts_realtime(&wb->gentime); - - ck_wlock(&sdata->workbase_lock); - sdata->workbases_generated++; - wb->mapped_id = sdata->workbase_id++; - HASH_ITER(hh, sdata->remote_workbases, tmp, tmpa) { - if (HASH_COUNT(sdata->remote_workbases) < 3) - break; - if (wb == tmp) - continue; - if (tmp->readcount) - continue; - /* Age old workbases older than 10 minutes old */ - if (tmp->gentime.tv_sec < wb->gentime.tv_sec - 600) { - HASH_DEL(sdata->remote_workbases, tmp); - ck_wunlock(&sdata->workbase_lock); - - clear_workbase(ckp, tmp); - - ck_wlock(&sdata->workbase_lock); - } - } - __add_to_remote_workbases(sdata, wb); - ck_wunlock(&sdata->workbase_lock); - - val = generate_workinfo(ckp, wb, __func__); - - /* Set jobid with mapped id for other nodes and remotes */ - json_set_int64(val, "jobid", wb->mapped_id); - - /* Replace workinfoid to mapped id */ - json_set_int64(val, "workinfoid", wb->mapped_id); - - /* Strip unnecessary fields and add extra fields needed */ - json_set_int(val, "txns", wb->txns); - json_set_string(val, "txn_hashes", wb->txn_hashes); - json_set_int(val, "merkles", wb->merkles); - - skip = subclient(wb->client_id); - - /* Send a copy of this to all OTHER remote trusted servers as well */ - ck_rlock(&sdata->instance_lock); - DL_FOREACH2(sdata->remote_instances, client, remote_next) { - ckmsg_t *client_msg; - json_t *json_msg; - smsg_t *msg; - - /* Don't send remote workinfo back to the source remote */ - if (client->id == wb->client_id) - continue; - json_msg = json_deep_copy(val); - json_set_string(json_msg, "method", stratum_msgs[SM_WORKINFO]); - client_msg = ckalloc(sizeof(ckmsg_t)); - msg = ckzalloc(sizeof(smsg_t)); - msg->json_msg = json_msg; - msg->client_id = client->id; - client_msg->data = msg; - DL_APPEND(bulk_send, client_msg); - messages++; - } - DL_FOREACH2(sdata->node_instances, client, node_next) { - ckmsg_t *client_msg; - json_t *json_msg; - smsg_t *msg; - - /* Don't send node workinfo back to the source node */ - if (client->id == skip) - continue; - json_msg = json_deep_copy(val); - json_set_string(json_msg, "node.method", stratum_msgs[SM_WORKINFO]); - client_msg = ckalloc(sizeof(ckmsg_t)); - msg = ckzalloc(sizeof(smsg_t)); - msg->json_msg = json_msg; - msg->client_id = client->id; - client_msg->data = msg; - DL_APPEND(bulk_send, client_msg); - messages++; - } - ck_runlock(&sdata->instance_lock); - - json_decref(val); - - if (bulk_send) { - LOGINFO("Sending remote workinfo to %d other remote servers", messages); - ssend_bulk_append(sdata, bulk_send, messages); - } -} - -static void add_node_base(ckpool_t *ckp, json_t *val, bool trusted, int64_t client_id) -{ - workbase_t *wb = ckzalloc(sizeof(workbase_t)); - sdata_t *sdata = ckp->sdata; - bool new_block = false; - char header[272]; - - wb->ckp = ckp; - /* This is the client id if this workbase came from a remote trusted - * server. */ - wb->client_id = client_id; - - /* Some of these fields are empty when running as a remote trusted - * server receiving other workinfos from the upstream pool */ - json_int64cpy(&wb->id, val, "jobid"); - json_strcpy(wb->target, val, "target"); - json_dblcpy(&wb->diff, val, "diff"); - json_uintcpy(&wb->version, val, "version"); - json_uintcpy(&wb->curtime, val, "curtime"); - json_strcpy(wb->prevhash, val, "prevhash"); - json_strcpy(wb->ntime, val, "ntime"); - sscanf(wb->ntime, "%x", &wb->ntime32); - json_strcpy(wb->bbversion, val, "bbversion"); - json_strcpy(wb->nbit, val, "nbit"); - json_uint64cpy(&wb->coinbasevalue, val, "coinbasevalue"); - json_intcpy(&wb->height, val, "height"); - json_strdup(&wb->flags, val, "flags"); - - json_intcpy(&wb->txns, val, "txns"); - json_strdup(&wb->txn_hashes, val, "txn_hashes"); - if (!ckp->proxy) { - /* This is a workbase from a trusted remote */ - wb->merkle_array = json_object_dup(val, "merklehash"); - json_intcpy(&wb->merkles, val, "merkles"); - if (!rebuild_txns(ckp, sdata, wb)) - wb->incomplete = true; - } else { - if (!rebuild_txns(ckp, sdata, wb)) { - clear_workbase(ckp, wb); - return; - } - } - json_strdup(&wb->coinb1, val, "coinb1"); - json_intcpy(&wb->coinb1len, val, "coinb1len"); - wb->coinb1bin = ckzalloc(wb->coinb1len); - hex2bin(wb->coinb1bin, wb->coinb1, wb->coinb1len); - json_strdup(&wb->coinb2, val, "coinb2"); - json_intcpy(&wb->coinb2len, val, "coinb2len"); - wb->coinb2bin = ckzalloc(wb->coinb2len); - hex2bin(wb->coinb2bin, wb->coinb2, wb->coinb2len); - json_intcpy(&wb->enonce1varlen, val, "enonce1varlen"); - json_intcpy(&wb->enonce2varlen, val, "enonce2varlen"); - ts_realtime(&wb->gentime); - - snprintf(header, 270, "%s%s%s%s%s%s%s", - wb->bbversion, wb->prevhash, - "0000000000000000000000000000000000000000000000000000000000000000", - wb->ntime, wb->nbit, - "00000000", /* nonce */ - workpadding); - header[224] = 0; - LOGDEBUG("Header: %s", header); - hex2bin(wb->headerbin, header, 112); - - /* If this is from a remote trusted server or an upstream server, add - * it to the remote_workbases hashtable */ - if (trusted) - add_remote_base(ckp, sdata, wb); - else - add_base(ckp, sdata, wb, &new_block); - - if (new_block) - LOGNOTICE("Block hash changed to %s", sdata->lastswaphash); -} - -/* Calculate share diff and fill in hash and swap. Need to hold workbase read count */ -static double -share_diff(char *coinbase, const uchar *enonce1bin, const workbase_t *wb, const char *nonce2, - const uint32_t ntime32, uint32_t version_mask, const char *nonce, - uchar *hash, uchar *swap, int *cblen) -{ - unsigned char merkle_root[32], merkle_sha[64]; - uint32_t *data32, *swap32, benonce32; - uchar hash1[32]; - char data[80]; - int i; - - memcpy(coinbase, wb->coinb1bin, wb->coinb1len); - *cblen = wb->coinb1len; - memcpy(coinbase + *cblen, enonce1bin, wb->enonce1constlen + wb->enonce1varlen); - *cblen += wb->enonce1constlen + wb->enonce1varlen; - hex2bin(coinbase + *cblen, nonce2, wb->enonce2varlen); - *cblen += wb->enonce2varlen; - memcpy(coinbase + *cblen, wb->coinb2bin, wb->coinb2len); - *cblen += wb->coinb2len; - - gen_hash((uchar *)coinbase, merkle_root, *cblen); - memcpy(merkle_sha, merkle_root, 32); - for (i = 0; i < wb->merkles; i++) { - memcpy(merkle_sha + 32, &wb->merklebin[i], 32); - gen_hash(merkle_sha, merkle_root, 64); - memcpy(merkle_sha, merkle_root, 32); - } - data32 = (uint32_t *)merkle_sha; - swap32 = (uint32_t *)merkle_root; - flip_32(swap32, data32); - - /* Copy the cached header binary and insert the merkle root */ - memcpy(data, wb->headerbin, 80); - memcpy(data + 36, merkle_root, 32); - - /* Update nVersion when version_mask is in use */ - if (version_mask) { - version_mask = htobe32(version_mask); - data32 = (uint32_t *)data; - *data32 |= version_mask; - } - - /* Insert the nonce value into the data */ - hex2bin(&benonce32, nonce, 4); - data32 = (uint32_t *)(data + 64 + 12); - *data32 = benonce32; - - /* Insert the ntime value into the data */ - data32 = (uint32_t *)(data + 68); - *data32 = htobe32(ntime32); - - /* Hash the share */ - data32 = (uint32_t *)data; - swap32 = (uint32_t *)swap; - flip_80(swap32, data32); - sha256(swap, 80, hash1); - sha256(hash1, 32, hash); - - /* Calculate the diff of the share here */ - return diff_from_target(hash); -} - -static void add_remote_blockdata(ckpool_t *ckp, json_t *val, const int cblen, const char *coinbase, - const uchar *data) -{ - char *buf; - - json_set_string(val, "name", ckp->name); - json_set_int(val, "cblen", cblen); - buf = bin2hex(coinbase, cblen); - json_set_string(val, "coinbasehex", buf); - free(buf); - buf = bin2hex(data, 80); - json_set_string(val, "swaphex", buf); - free(buf); -} - -/* Entered with workbase readcount, grabs instance_lock. client_id is where the - * block originated. */ -static void send_nodes_block(sdata_t *sdata, const json_t *block_val, const int64_t client_id) -{ - stratum_instance_t *client; - ckmsg_t *bulk_send = NULL; - int messages = 0; - int64_t skip; - - /* Don't send the block back to a remote node if that's where it was - * found. */ - skip = subclient(client_id); - - ck_rlock(&sdata->instance_lock); - DL_FOREACH2(sdata->node_instances, client, node_next) { - ckmsg_t *client_msg; - json_t *json_msg; - smsg_t *msg; - - if (client->id == skip) - continue; - json_msg = json_deep_copy(block_val); - json_set_string(json_msg, "node.method", stratum_msgs[SM_BLOCK]); - client_msg = ckalloc(sizeof(ckmsg_t)); - msg = ckzalloc(sizeof(smsg_t)); - msg->json_msg = json_msg; - msg->client_id = client->id; - client_msg->data = msg; - DL_APPEND(bulk_send, client_msg); - messages++; - } - ck_runlock(&sdata->instance_lock); - - if (bulk_send) { - LOGNOTICE("Sending block to %d mining nodes", messages); - ssend_bulk_prepend(sdata, bulk_send, messages); - } - -} - - -/* Entered with workbase readcount. */ -static void send_node_block(ckpool_t *ckp, sdata_t *sdata, const char *enonce1, const char *nonce, - const char *nonce2, const uint32_t ntime32, const uint32_t version_mask, - const int64_t jobid, const double diff, const int64_t client_id, - const char *coinbase, const int cblen, const uchar *data) -{ - if (sdata->node_instances) { - json_t *val = json_object(); - - json_set_string(val, "enonce1", enonce1); - json_set_string(val, "nonce", nonce); - json_set_string(val, "nonce2", nonce2); - json_set_uint32(val, "ntime32", ntime32); - json_set_uint32(val, "version_mask", version_mask); - json_set_int64(val, "jobid", jobid); - json_set_double(val, "diff", diff); - add_remote_blockdata(ckp, val, cblen, coinbase, data); - send_nodes_block(sdata, val, client_id); - json_decref(val); - } -} - -/* Process a block into a message for the generator to submit. Must hold - * workbase readcount */ -static char * -process_block(const workbase_t *wb, const char *coinbase, const int cblen, - const uchar *data, const uchar *hash, uchar *flip32, char *blockhash) -{ - char *gbt_block, varint[12]; - int txns = wb->txns + 1; - char hexcoinbase[1024]; - - flip_32(flip32, hash); - __bin2hex(blockhash, flip32, 32); - - /* Message format: "data" */ - gbt_block = ckzalloc(1024); - __bin2hex(gbt_block, data, 80); - if (txns < 0xfd) { - uint8_t val8 = txns; - - __bin2hex(varint, (const unsigned char *)&val8, 1); - } else if (txns <= 0xffff) { - uint16_t val16 = htole16(txns); - - strcat(gbt_block, "fd"); - __bin2hex(varint, (const unsigned char *)&val16, 2); - } else { - uint32_t val32 = htole32(txns); - - strcat(gbt_block, "fe"); - __bin2hex(varint, (const unsigned char *)&val32, 4); - } - strcat(gbt_block, varint); - __bin2hex(hexcoinbase, coinbase, cblen); - strcat(gbt_block, hexcoinbase); - if (wb->txns) - realloc_strcat(&gbt_block, wb->txn_data); - return gbt_block; -} - -/* Submit block data locally, absorbing and freeing gbt_block */ -static bool local_block_submit(ckpool_t *ckp, char *gbt_block, const uchar *flip32, int height) -{ - bool ret = generator_submitblock(ckp, gbt_block); - char heighthash[68] = {}, rhash[68] = {}; - uchar swap256[32]; - - free(gbt_block); - swap_256(swap256, flip32); - __bin2hex(rhash, swap256, 32); - generator_preciousblock(ckp, rhash); - - /* Check failures that may be inconclusive but were submitted via other - * means or accepted due to precious block call. */ - if (!ret) { - /* If the block is accepted locally, it means we may have - * displaced a known block, and are now working on this fork. - * This makes the most sense since if we solve the next block, - * it validates this one as the best chain, orphaning the other - * block. In the case of mainnet, it means we have found a stale - * block and are trying to force ours ahead of the other. In - * a low diff environment we may have successive blocks, and - * this will be the last one solved locally. Trying to optimise - * regtest/testnet will optimise against the mainnet case. */ - if (generator_get_blockhash(ckp, height, heighthash)) { - ret = !strncmp(rhash, heighthash, 64); - LOGWARNING("Hash for forced possibly stale block, height %d confirms block was %s", - height, ret ? "ACCEPTED" : "REJECTED"); - } - } - return ret; -} - -static workbase_t *get_workbase(sdata_t *sdata, const int64_t id) -{ - workbase_t *wb; - - ck_wlock(&sdata->workbase_lock); - HASH_FIND_I64(sdata->workbases, &id, wb); - if (wb) - wb->readcount++; - ck_wunlock(&sdata->workbase_lock); - - return wb; -} - -static workbase_t *__find_remote_workbase(sdata_t *sdata, const int64_t id, const int64_t client_id) -{ - int64_t lookup[2] = {id, client_id}; - workbase_t *wb; - - HASH_FIND(hh, sdata->remote_workbases, lookup, sizeof(int64_t) * 2, wb); - return wb; -} - -static workbase_t *get_remote_workbase(sdata_t *sdata, const int64_t id, const int64_t client_id) -{ - workbase_t *wb; - - ck_wlock(&sdata->workbase_lock); - wb = __find_remote_workbase(sdata, id, client_id); - if (wb) { - if (wb->incomplete) - wb = NULL; - else - wb->readcount++; - } - ck_wunlock(&sdata->workbase_lock); - - return wb; -} - -static void put_workbase(sdata_t *sdata, workbase_t *wb) -{ - ck_wlock(&sdata->workbase_lock); - wb->readcount--; - ck_wunlock(&sdata->workbase_lock); -} - -#define put_remote_workbase(sdata, wb) put_workbase(sdata, wb) - -static void block_solve(ckpool_t *ckp, json_t *val); -static void block_reject(json_t *val); - -static void submit_node_block(ckpool_t *ckp, sdata_t *sdata, json_t *val) -{ - char *coinbase = NULL, *enonce1 = NULL, *nonce = NULL, *nonce2 = NULL, *gbt_block, - *coinbasehex, *swaphex; - uchar *enonce1bin = NULL, hash[32], swap[80], flip32[32]; - uint32_t ntime32, version_mask = 0; - char blockhash[68], cdfield[64]; - int enonce1len, cblen; - workbase_t *wb = NULL; - json_t *bval; - double diff; - ts_t ts_now; - int64_t id; - bool ret; - - if (unlikely(!json_get_string(&enonce1, val, "enonce1"))) { - LOGWARNING("Failed to get enonce1 from node method block"); - goto out; - } - if (unlikely(!json_get_string(&nonce, val, "nonce"))) { - LOGWARNING("Failed to get nonce from node method block"); - goto out; - } - if (unlikely(!json_get_string(&nonce2, val, "nonce2"))) { - LOGWARNING("Failed to get nonce2 from node method block"); - goto out; - } - if (unlikely(!json_get_uint32(&ntime32, val, "ntime32"))) { - LOGWARNING("Failed to get ntime32 from node method block"); - goto out; - } - if (unlikely(!json_get_int64(&id, val, "jobid"))) { - LOGWARNING("Failed to get jobid from node method block"); - goto out; - } - if (unlikely(!json_get_double(&diff, val, "diff"))) { - LOGWARNING("Failed to get diff from node method block"); - goto out; - } - - if (!json_get_uint32(&version_mask, val, "version_mask")) { - /* No version mask is not fatal, assume it to be zero */ - LOGINFO("No version mask in node method block"); - } - - LOGWARNING("Possible upstream block solve diff %lf !", diff); - - ts_realtime(&ts_now); - sprintf(cdfield, "%lu,%lu", ts_now.tv_sec, ts_now.tv_nsec); - - wb = get_workbase(sdata, id); - if (unlikely(!wb)) { - LOGWARNING("Failed to find workbase with jobid %"PRId64" in node method block", id); - goto out; - } - - /* Get parameters if upstream pool supports them with new format */ - json_get_string(&coinbasehex, val, "coinbasehex"); - json_get_int(&cblen, val, "cblen"); - json_get_string(&swaphex, val, "swaphex"); - if (coinbasehex && cblen && swaphex) { - uchar hash1[32]; - - coinbase = alloca(cblen); - hex2bin(coinbase, coinbasehex, cblen); - hex2bin(swap, swaphex, 80); - sha256(swap, 80, hash1); - sha256(hash1, 32, hash); - } else { - /* Rebuild the old way if we can if the upstream pool is using - * the old format only */ - enonce1len = wb->enonce1constlen + wb->enonce1varlen; - enonce1bin = alloca(enonce1len); - hex2bin(enonce1bin, enonce1, enonce1len); - coinbase = alloca(wb->coinb1len + wb->enonce1constlen + wb->enonce1varlen + wb->enonce2varlen + wb->coinb2len); - /* Fill in the hashes */ - share_diff(coinbase, enonce1bin, wb, nonce2, ntime32, version_mask, nonce, hash, swap, &cblen); - } - - /* Now we have enough to assemble a block */ - gbt_block = process_block(wb, coinbase, cblen, swap, hash, flip32, blockhash); - ret = local_block_submit(ckp, gbt_block, flip32, wb->height); - - JSON_CPACK(bval, "{si,ss,ss,sI,ss,ss,si,ss,sI,sf,ss,ss,ss,ss}", - "height", wb->height, - "blockhash", blockhash, - "confirmed", "n", - "workinfoid", wb->id, - "enonce1", enonce1, - "nonce2", nonce2, - "version_mask", version_mask, - "nonce", nonce, - "reward", wb->coinbasevalue, - "diff", diff, - "createdate", cdfield, - "createby", "code", - "createcode", __func__, - "createinet", ckp->serverurl[0]); - put_workbase(sdata, wb); - - if (ret) - block_solve(ckp, bval); - else - block_reject(bval); - - json_decref(bval); -out: - free(nonce2); - free(nonce); - free(enonce1); -} - -static void update_base(sdata_t *sdata, const int prio) -{ - int *uprio; - - /* All uses of block_update are serialised so if we have more - * update_base calls waiting there is no point servicing them unless - * they are high priority. */ - if (prio < GEN_PRIORITY) { - /* Don't queue another routine update if one is already in - * progress. */ - if (cksem_trywait(&sdata->update_sem)) { - LOGINFO("Skipped lowprio update base"); - return; - } - } else - cksem_wait(&sdata->update_sem); - - uprio = ckalloc(sizeof(int)); - *uprio = prio; - ckmsgq_add(sdata->updateq, uprio); -} - -/* Instead of removing the client instance, we add it to a list of recycled - * clients allowing us to reuse it instead of callocing a new one */ -static void __kill_instance(sdata_t *sdata, stratum_instance_t *client) -{ - if (client->proxy) { - client->proxy->bound_clients--; - client->proxy->parent->combined_clients--; - } - free(client->workername); - free(client->password); - free(client->useragent); - memset(client, 0, sizeof(stratum_instance_t)); - DL_APPEND2(sdata->recycled_instances, client, recycled_prev, recycled_next); -} - -/* Called with instance_lock held. Note stats.users is protected by - * instance lock to avoid recursive locking. */ -static void __inc_worker(sdata_t *sdata, user_instance_t *user, worker_instance_t *worker) -{ - sdata->stats.workers++; - if (!user->workers++) - sdata->stats.users++; - worker->instance_count++; -} - -static void __dec_worker(sdata_t *sdata, user_instance_t *user, worker_instance_t *worker) -{ - sdata->stats.workers--; - if (!--user->workers) - sdata->stats.users--; - worker->instance_count--; -} - -static void __disconnect_session(sdata_t *sdata, const stratum_instance_t *client) -{ - time_t now_t = time(NULL); - session_t *session, *tmp; - - /* Opportunity to age old sessions */ - HASH_ITER(hh, sdata->disconnected_sessions, session, tmp) { - if (now_t - session->added > 600) { - HASH_DEL(sdata->disconnected_sessions, session); - dealloc(session); - sdata->stats.disconnected--; - } - } - - if (!client->enonce1_64 || !client->user_instance || !client->authorised) - return; - HASH_FIND_INT(sdata->disconnected_sessions, &client->session_id, session); - if (session) - return; - session = ckalloc(sizeof(session_t)); - session->enonce1_64 = client->enonce1_64; - session->session_id = client->session_id; - session->client_id = client->id; - session->userid = client->user_id; - session->added = now_t; - strcpy(session->address, client->address); - HASH_ADD_INT(sdata->disconnected_sessions, session_id, session); - sdata->stats.disconnected++; - sdata->disconnected_generated++; -} - -/* Removes a client instance we know is on the stratum_instances list and from - * the user client list if it's been placed on it */ -static void __del_client(sdata_t *sdata, stratum_instance_t *client) -{ - user_instance_t *user = client->user_instance; - - HASH_DEL(sdata->stratum_instances, client); - if (user) { - DL_DELETE2(user->clients, client, user_prev, user_next ); - __dec_worker(sdata, user, client->worker_instance); - } -} - -static void connector_drop_client(ckpool_t *ckp, const int64_t id) -{ - char buf[256]; - - LOGDEBUG("Stratifier requesting connector drop client %"PRId64, id); - snprintf(buf, 255, "dropclient=%"PRId64, id); - send_proc(ckp->connector, buf); -} - -static void drop_allclients(ckpool_t *ckp) -{ - stratum_instance_t *client, *tmp; - sdata_t *sdata = ckp->sdata; - int kills = 0; - - ck_wlock(&sdata->instance_lock); - HASH_ITER(hh, sdata->stratum_instances, client, tmp) { - int64_t client_id = client->id; - - if (!client->ref) { - __del_client(sdata, client); - __kill_instance(sdata, client); - } else - client->dropped = true; - kills++; - connector_drop_client(ckp, client_id); - } - sdata->stats.users = sdata->stats.workers = 0; - ck_wunlock(&sdata->instance_lock); - - if (kills) - LOGNOTICE("Dropped %d instances for dropall request", kills); -} - -/* Copy only the relevant parts of the master sdata for each subproxy */ -static sdata_t *duplicate_sdata(const sdata_t *sdata) -{ - sdata_t *dsdata = ckzalloc(sizeof(sdata_t)); - - dsdata->ckp = sdata->ckp; - - /* Copy the transaction binaries for workbase creation */ - memcpy(dsdata->txnbin, sdata->txnbin, 40); - memcpy(dsdata->dontxnbin, sdata->dontxnbin, 40); - - /* Use the same work queues for all subproxies */ - dsdata->ssends = sdata->ssends; - dsdata->srecvs = sdata->srecvs; - dsdata->sshareq = sdata->sshareq; - dsdata->sauthq = sdata->sauthq; - dsdata->stxnq = sdata->stxnq; - - /* Give the sbuproxy its own workbase list and lock */ - cklock_init(&dsdata->workbase_lock); - cksem_init(&dsdata->update_sem); - cksem_post(&dsdata->update_sem); - return dsdata; -} - -static int64_t prio_sort(proxy_t *a, proxy_t *b) -{ - return (a->priority - b->priority); -} - -/* Masked increment */ -static int64_t masked_inc(int64_t value, int64_t mask) -{ - value &= ~mask; - value++; - value |= mask; - return value; -} - -/* Priority values can be sparse, they do not need to be sequential */ -static void __set_proxy_prio(sdata_t *sdata, proxy_t *proxy, int64_t priority) -{ - proxy_t *tmpa, *tmpb, *exists = NULL; - int64_t mask, next_prio = 0; - - /* Encode the userid as the high bits in priority */ - mask = proxy->userid; - mask <<= 32; - priority |= mask; - - /* See if the priority is already in use */ - HASH_ITER(hh, sdata->proxies, tmpa, tmpb) { - if (tmpa->priority > priority) - break; - if (tmpa->priority == priority) { - exists = tmpa; - next_prio = masked_inc(priority, mask); - break; - } - } - /* See if we need to push the priority of everything after exists up */ - HASH_ITER(hh, exists, tmpa, tmpb) { - if (tmpa->priority > next_prio) - break; - tmpa->priority = masked_inc(tmpa->priority, mask); - next_prio++; - } - proxy->priority = priority; - HASH_SORT(sdata->proxies, prio_sort); -} - -static proxy_t *__generate_proxy(sdata_t *sdata, const int id) -{ - proxy_t *proxy = ckzalloc(sizeof(proxy_t)); - - proxy->parent = proxy; - proxy->id = id; - proxy->sdata = duplicate_sdata(sdata); - proxy->sdata->subproxy = proxy; - proxy->sdata->verbose = true; - /* subid == 0 on parent proxy */ - HASH_ADD(sh, proxy->subproxies, subid, sizeof(int), proxy); - proxy->subproxy_count++; - HASH_ADD_INT(sdata->proxies, id, proxy); - /* Set the initial priority to impossibly high initially as the userid - * has yet to be inherited and the priority should be set only after - * all the proxy details are finalised. */ - proxy->priority = 0x00FFFFFFFFFFFFFF; - HASH_SORT(sdata->proxies, prio_sort); - sdata->proxy_count++; - return proxy; -} - -static proxy_t *__generate_subproxy(sdata_t *sdata, proxy_t *proxy, const int subid) -{ - proxy_t *subproxy = ckzalloc(sizeof(proxy_t)); - - subproxy->parent = proxy; - subproxy->id = proxy->id; - subproxy->subid = subid; - HASH_ADD(sh, proxy->subproxies, subid, sizeof(int), subproxy); - proxy->subproxy_count++; - subproxy->sdata = duplicate_sdata(sdata); - subproxy->sdata->subproxy = subproxy; - return subproxy; -} - -static proxy_t *__existing_proxy(const sdata_t *sdata, const int id) -{ - proxy_t *proxy; - - HASH_FIND_INT(sdata->proxies, &id, proxy); - return proxy; -} - -static proxy_t *existing_proxy(sdata_t *sdata, const int id) -{ - proxy_t *proxy; - - mutex_lock(&sdata->proxy_lock); - proxy = __existing_proxy(sdata, id); - mutex_unlock(&sdata->proxy_lock); - - return proxy; -} - -/* Find proxy by id number, generate one if none exist yet by that id */ -static proxy_t *__proxy_by_id(sdata_t *sdata, const int id) -{ - proxy_t *proxy = __existing_proxy(sdata, id); - - if (unlikely(!proxy)) { - proxy = __generate_proxy(sdata, id); - LOGNOTICE("Stratifier added new proxy %d", id); - } - - return proxy; -} - -static proxy_t *__existing_subproxy(proxy_t *proxy, const int subid) -{ - proxy_t *subproxy; - - HASH_FIND(sh, proxy->subproxies, &subid, sizeof(int), subproxy); - return subproxy; -} - -static proxy_t *__subproxy_by_id(sdata_t *sdata, proxy_t *proxy, const int subid) -{ - proxy_t *subproxy = __existing_subproxy(proxy, subid); - - if (!subproxy) { - subproxy = __generate_subproxy(sdata, proxy, subid); - LOGINFO("Stratifier added new subproxy %d:%d", proxy->id, subid); - } - return subproxy; -} - -static proxy_t *subproxy_by_id(sdata_t *sdata, const int id, const int subid) -{ - proxy_t *proxy, *subproxy; - - mutex_lock(&sdata->proxy_lock); - proxy = __proxy_by_id(sdata, id); - subproxy = __subproxy_by_id(sdata, proxy, subid); - mutex_unlock(&sdata->proxy_lock); - - return subproxy; -} - -static proxy_t *existing_subproxy(sdata_t *sdata, const int id, const int subid) -{ - proxy_t *proxy, *subproxy = NULL; - - mutex_lock(&sdata->proxy_lock); - proxy = __existing_proxy(sdata, id); - if (proxy) - subproxy = __existing_subproxy(proxy, subid); - mutex_unlock(&sdata->proxy_lock); - - return subproxy; -} - -static void check_userproxies(sdata_t *sdata, proxy_t *proxy, const int userid); - -static void set_proxy_prio(sdata_t *sdata, proxy_t *proxy, const int priority) -{ - mutex_lock(&sdata->proxy_lock); - __set_proxy_prio(sdata, proxy, priority); - mutex_unlock(&sdata->proxy_lock); - - if (!proxy->global) - check_userproxies(sdata, proxy, proxy->userid); -} - -/* Set proxy to the current proxy and calculate how much headroom it has */ -static int64_t current_headroom(sdata_t *sdata, proxy_t **proxy) -{ - proxy_t *subproxy, *tmp; - int64_t headroom = 0; - - mutex_lock(&sdata->proxy_lock); - *proxy = sdata->proxy; - if (!*proxy) - goto out_unlock; - HASH_ITER(sh, (*proxy)->subproxies, subproxy, tmp) { - if (subproxy->dead) - continue; - headroom += subproxy->max_clients - subproxy->clients; - } -out_unlock: - mutex_unlock(&sdata->proxy_lock); - - return headroom; -} - -/* Returns the headroom available for more clients of the best alive user proxy - * for userid. */ -static int64_t best_userproxy_headroom(sdata_t *sdata, const int userid) -{ - proxy_t *proxy, *subproxy, *tmp, *subtmp; - int64_t headroom = 0; - - mutex_lock(&sdata->proxy_lock); - HASH_ITER(hh, sdata->proxies, proxy, tmp) { - bool alive = false; - - if (proxy->userid < userid) - continue; - if (proxy->userid > userid) - break; - HASH_ITER(sh, proxy->subproxies, subproxy, subtmp) { - if (subproxy->dead) - continue; - alive = true; - headroom += subproxy->max_clients - subproxy->clients; - } - /* Proxies are ordered by priority so first available will be - * the best priority */ - if (alive) - break; - } - mutex_unlock(&sdata->proxy_lock); - - return headroom; -} - -static void reconnect_client(sdata_t *sdata, stratum_instance_t *client); - -static void generator_recruit(ckpool_t *ckp, const int proxyid, const int recruits) -{ - char buf[256]; - - sprintf(buf, "recruit=%d:%d", proxyid, recruits); - LOGINFO("Stratifer requesting %d more subproxies of proxy %d from generator", - recruits, proxyid); - send_proc(ckp->generator,buf); -} - -/* Find how much headroom we have and connect up to that many clients that are - * not currently on this pool, recruiting more slots to switch more clients - * later on lazily. Only reconnect clients bound to global proxies. */ -static void reconnect_global_clients(sdata_t *sdata) -{ - stratum_instance_t *client, *tmpclient; - int reconnects = 0; - int64_t headroom; - proxy_t *proxy; - - headroom = current_headroom(sdata, &proxy); - if (!proxy) - return; - - ck_rlock(&sdata->instance_lock); - HASH_ITER(hh, sdata->stratum_instances, client, tmpclient) { - if (client->dropped) - continue; - if (!client->authorised) - continue; - /* Is this client bound to a dead proxy? */ - if (!client->reconnect) { - /* This client is bound to a user proxy */ - if (client->proxy->userid) - continue; - if (client->proxyid == proxy->id) - continue; - } - if (headroom-- < 1) - continue; - reconnects++; - reconnect_client(sdata, client); - } - ck_runlock(&sdata->instance_lock); - - if (reconnects) { - LOGINFO("%d clients flagged for reconnect to global proxy %d", - reconnects, proxy->id); - } - if (headroom < 0) - generator_recruit(sdata->ckp, proxy->id, -headroom); -} - -static bool __subproxies_alive(proxy_t *proxy) -{ - proxy_t *subproxy, *tmp; - bool alive = false; - - HASH_ITER(sh, proxy->subproxies, subproxy, tmp) { - if (!subproxy->dead) { - alive = true; - break; - } - } - return alive; -} - -/* Iterate over the current global proxy list and see if the current one is - * the highest priority alive one. Proxies are sorted by priority so the first - * available will be highest priority. Uses ckp sdata */ -static void check_bestproxy(sdata_t *sdata) -{ - proxy_t *proxy, *tmp; - int changed_id = -1; - - mutex_lock(&sdata->proxy_lock); - if (sdata->proxy && !__subproxies_alive(sdata->proxy)) - sdata->proxy = NULL; - HASH_ITER(hh, sdata->proxies, proxy, tmp) { - if (!__subproxies_alive(proxy)) - continue; - if (!proxy->global) - break; - if (proxy != sdata->proxy) { - sdata->proxy = proxy; - changed_id = proxy->id; - } - break; - } - mutex_unlock(&sdata->proxy_lock); - - if (changed_id != -1) - LOGNOTICE("Stratifier setting active proxy to %d", changed_id); -} - -static proxy_t *best_proxy(sdata_t *sdata) -{ - proxy_t *proxy; - - mutex_lock(&sdata->proxy_lock); - proxy = sdata->proxy; - mutex_unlock(&sdata->proxy_lock); - - return proxy; -} - -static void check_globalproxies(sdata_t *sdata, proxy_t *proxy) -{ - check_bestproxy(sdata); - if (proxy->parent == best_proxy(sdata)->parent) - reconnect_global_clients(sdata); -} - -static void check_proxy(sdata_t *sdata, proxy_t *proxy) -{ - if (proxy->global) - check_globalproxies(sdata, proxy); - else - check_userproxies(sdata, proxy, proxy->userid); -} - -static void dead_proxyid(sdata_t *sdata, const int id, const int subid, const bool replaced, const bool deleted) -{ - stratum_instance_t *client, *tmp; - int reconnects = 0, proxyid = 0; - int64_t headroom; - proxy_t *proxy; - - proxy = existing_subproxy(sdata, id, subid); - if (proxy) { - proxy->dead = true; - proxy->deleted = deleted; - if (!replaced && proxy->global) - check_bestproxy(sdata); - } - LOGINFO("Stratifier dropping clients from proxy %d:%d", id, subid); - headroom = current_headroom(sdata, &proxy); - if (proxy) - proxyid = proxy->id; - - ck_rlock(&sdata->instance_lock); - HASH_ITER(hh, sdata->stratum_instances, client, tmp) { - if (client->proxyid != id || client->subproxyid != subid) - continue; - /* Clients could remain connected to a dead connection here - * but should be picked up when we recruit enough slots after - * another notify. */ - if (headroom-- < 1) { - client->reconnect = true; - continue; - } - reconnects++; - reconnect_client(sdata, client); - } - ck_runlock(&sdata->instance_lock); - - if (reconnects) { - LOGINFO("%d clients flagged to reconnect from dead proxy %d:%d", reconnects, - id, subid); - } - /* When a proxy dies, recruit more of the global proxies for them to - * fail over to in case user proxies are unavailable. */ - if (headroom < 0) - generator_recruit(sdata->ckp, proxyid, -headroom); -} - -static void update_subscribe(ckpool_t *ckp, const char *cmd) -{ - sdata_t *sdata = ckp->sdata, *dsdata; - int id = 0, subid = 0, userid = 0; - proxy_t *proxy, *old = NULL; - const char *buf; - bool global; - json_t *val; - - if (unlikely(strlen(cmd) < 11)) { - LOGWARNING("Received zero length string for subscribe in update_subscribe"); - return; - } - buf = cmd + 10; - LOGDEBUG("Update subscribe: %s", buf); - val = json_loads(buf, 0, NULL); - if (unlikely(!val)) { - LOGWARNING("Failed to json decode subscribe response in update_subscribe %s", buf); - return; - } - if (unlikely(!json_get_int(&id, val, "proxy"))) { - LOGWARNING("Failed to json decode proxy value in update_subscribe %s", buf); - return; - } - if (unlikely(!json_get_int(&subid, val, "subproxy"))) { - LOGWARNING("Failed to json decode subproxy value in update_subscribe %s", buf); - return; - } - if (unlikely(!json_get_bool(&global, val, "global"))) { - LOGWARNING("Failed to json decode global value in update_subscribe %s", buf); - return; - } - if (!global) { - if (unlikely(!json_get_int(&userid, val, "userid"))) { - LOGWARNING("Failed to json decode userid value in update_subscribe %s", buf); - return; - } - } - - if (!subid) - LOGNOTICE("Got updated subscribe for proxy %d", id); - else - LOGINFO("Got updated subscribe for proxy %d:%d", id, subid); - - /* Is this a replacement for an existing proxy id? */ - old = existing_subproxy(sdata, id, subid); - if (old) { - dead_proxyid(sdata, id, subid, true, false); - proxy = old; - proxy->dead = false; - } else /* This is where all new proxies are created */ - proxy = subproxy_by_id(sdata, id, subid); - proxy->global = global; - proxy->userid = userid; - proxy->subscribed = true; - proxy->diff = ckp->startdiff; - memset(proxy->baseurl, 0, 128); - memset(proxy->url, 0, 128); - memset(proxy->auth, 0, 128); - memset(proxy->pass, 0, 128); - strncpy(proxy->baseurl, json_string_value(json_object_get(val, "baseurl")), 127); - strncpy(proxy->url, json_string_value(json_object_get(val, "url")), 127); - strncpy(proxy->auth, json_string_value(json_object_get(val, "auth")), 127); - strncpy(proxy->pass, json_string_value(json_object_get(val, "pass")), 127); - - dsdata = proxy->sdata; - - ck_wlock(&dsdata->workbase_lock); - /* Length is checked by generator */ - strcpy(proxy->enonce1, json_string_value(json_object_get(val, "enonce1"))); - proxy->enonce1constlen = strlen(proxy->enonce1) / 2; - hex2bin(proxy->enonce1bin, proxy->enonce1, proxy->enonce1constlen); - proxy->nonce2len = json_integer_value(json_object_get(val, "nonce2len")); - if (ckp->nonce2length) { - proxy->enonce1varlen = proxy->nonce2len - ckp->nonce2length; - if (proxy->enonce1varlen < 0) - proxy->enonce1varlen = 0; - } else if (proxy->nonce2len > 7) - proxy->enonce1varlen = 4; - else if (proxy->nonce2len > 5) - proxy->enonce1varlen = 2; - else if (proxy->nonce2len > 3) - proxy->enonce1varlen = 1; - else - proxy->enonce1varlen = 0; - proxy->enonce2varlen = proxy->nonce2len - proxy->enonce1varlen; - proxy->max_clients = 1ll << (proxy->enonce1varlen * 8); - proxy->clients = 0; - ck_wunlock(&dsdata->workbase_lock); - - if (subid) { - LOGINFO("Upstream pool %s %d:%d extranonce2 length %d, max proxy clients %"PRId64, - proxy->url, id, subid, proxy->nonce2len, proxy->max_clients); - } else { - LOGNOTICE("Upstream pool %s %d extranonce2 length %d, max proxy clients %"PRId64, - proxy->url, id, proxy->nonce2len, proxy->max_clients); - } - if (ckp->nonce2length && proxy->enonce2varlen != ckp->nonce2length) - LOGWARNING("Only able to set nonce2len %d of requested %d on proxy %d:%d", - proxy->enonce2varlen, ckp->nonce2length, id, subid); - json_decref(val); - - /* Set the priority on a new proxy now that we have all the fields - * filled in to push it to its correct priority position in the - * hashlist. */ - if (!old) - set_proxy_prio(sdata, proxy, id); - - check_proxy(sdata, proxy); -} - -/* Find the highest priority alive proxy belonging to userid and recruit extra - * subproxies. */ -static void recruit_best_userproxy(sdata_t *sdata, const int userid, const int recruits) -{ - proxy_t *proxy, *subproxy, *tmp, *subtmp; - int id = -1; - - mutex_lock(&sdata->proxy_lock); - HASH_ITER(hh, sdata->proxies, proxy, tmp) { - if (proxy->userid < userid) - continue; - if (proxy->userid > userid) - break; - HASH_ITER(sh, proxy->subproxies, subproxy, subtmp) { - if (subproxy->dead) - continue; - id = proxy->id; - } - } - mutex_unlock(&sdata->proxy_lock); - - if (id != -1) - generator_recruit(sdata->ckp, id, recruits); -} - -/* Check how much headroom the userid proxies have and reconnect any clients - * that are not bound to it that should be */ -static void check_userproxies(sdata_t *sdata, proxy_t *proxy, const int userid) -{ - int64_t headroom = best_userproxy_headroom(sdata, userid); - stratum_instance_t *client, *tmpclient; - int reconnects = 0; - - ck_rlock(&sdata->instance_lock); - HASH_ITER(hh, sdata->stratum_instances, client, tmpclient) { - if (client->dropped) - continue; - if (!client->authorised) - continue; - if (client->user_id != userid) - continue; - /* Is the client already bound to a proxy of its own userid of - * a higher priority than this one. */ - if (client->proxy->userid == userid && - client->proxy->parent->priority <= proxy->parent->priority) - continue; - if (headroom-- < 1) - continue; - reconnects++; - reconnect_client(sdata, client); - } - ck_runlock(&sdata->instance_lock); - - if (reconnects) { - LOGINFO("%d clients flagged for reconnect to user %d proxies", - reconnects, userid); - } - if (headroom < 0) - recruit_best_userproxy(sdata, userid, -headroom); -} - -static void update_notify(ckpool_t *ckp, const char *cmd) -{ - sdata_t *sdata = ckp->sdata, *dsdata; - bool new_block = false, clean; - int i, id = 0, subid = 0; - char header[272]; - const char *buf; - proxy_t *proxy; - workbase_t *wb; - json_t *val; - - if (unlikely(strlen(cmd) < 8)) { - LOGWARNING("Zero length string passed to update_notify"); - return; - } - buf = cmd + 7; /* "notify=" */ - LOGDEBUG("Update notify: %s", buf); - - val = json_loads(buf, 0, NULL); - if (unlikely(!val)) { - LOGWARNING("Failed to json decode in update_notify"); - return; - } - json_get_int(&id, val, "proxy"); - json_get_int(&subid, val, "subproxy"); - proxy = existing_subproxy(sdata, id, subid); - if (unlikely(!proxy || !proxy->subscribed)) { - LOGINFO("No valid proxy %d:%d subscription to update notify yet", id, subid); - goto out; - } - LOGINFO("Got updated notify for proxy %d:%d", id, subid); - - wb = ckzalloc(sizeof(workbase_t)); - wb->ckp = ckp; - wb->proxy = true; - - json_get_int64(&wb->id, val, "jobid"); - json_strcpy(wb->prevhash, val, "prevhash"); - json_intcpy(&wb->coinb1len, val, "coinb1len"); - wb->coinb1bin = ckalloc(wb->coinb1len); - wb->coinb1 = ckalloc(wb->coinb1len * 2 + 1); - json_strcpy(wb->coinb1, val, "coinbase1"); - hex2bin(wb->coinb1bin, wb->coinb1, wb->coinb1len); - wb->height = get_sernumber(wb->coinb1bin + 42); - json_strdup(&wb->coinb2, val, "coinbase2"); - wb->coinb2len = strlen(wb->coinb2) / 2; - wb->coinb2bin = ckalloc(wb->coinb2len); - hex2bin(wb->coinb2bin, wb->coinb2, wb->coinb2len); - wb->merkle_array = json_object_dup(val, "merklehash"); - wb->merkles = json_array_size(wb->merkle_array); - for (i = 0; i < wb->merkles; i++) { - strcpy(&wb->merklehash[i][0], json_string_value(json_array_get(wb->merkle_array, i))); - hex2bin(&wb->merklebin[i][0], &wb->merklehash[i][0], 32); - } - json_strcpy(wb->bbversion, val, "bbversion"); - json_strcpy(wb->nbit, val, "nbit"); - json_strcpy(wb->ntime, val, "ntime"); - sscanf(wb->ntime, "%x", &wb->ntime32); - clean = json_is_true(json_object_get(val, "clean")); - ts_realtime(&wb->gentime); - snprintf(header, 270, "%s%s%s%s%s%s%s", - wb->bbversion, wb->prevhash, - "0000000000000000000000000000000000000000000000000000000000000000", - wb->ntime, wb->nbit, - "00000000", /* nonce */ - workpadding); - header[224] = 0; - LOGDEBUG("Header: %s", header); - hex2bin(wb->headerbin, header, 112); - wb->txn_hashes = ckzalloc(1); - - dsdata = proxy->sdata; - - ck_rlock(&dsdata->workbase_lock); - strcpy(wb->enonce1const, proxy->enonce1); - wb->enonce1constlen = proxy->enonce1constlen; - memcpy(wb->enonce1constbin, proxy->enonce1bin, wb->enonce1constlen); - wb->enonce1varlen = proxy->enonce1varlen; - wb->enonce2varlen = proxy->enonce2varlen; - wb->diff = proxy->diff; - ck_runlock(&dsdata->workbase_lock); - - add_base(ckp, dsdata, wb, &new_block); - if (new_block) { - if (subid) - LOGINFO("Block hash on proxy %d:%d changed to %s", id, subid, dsdata->lastswaphash); - else - LOGNOTICE("Block hash on proxy %d changed to %s", id, dsdata->lastswaphash); - } - - check_proxy(sdata, proxy); - clean |= new_block; - LOGINFO("Proxy %d:%d broadcast updated stratum notify with%s clean", id, - subid, clean ? "" : "out"); - stratum_broadcast_update(dsdata, wb, clean); -out: - json_decref(val); -} - -static void stratum_send_diff(sdata_t *sdata, const stratum_instance_t *client); - -static void update_diff(ckpool_t *ckp, const char *cmd) -{ - sdata_t *sdata = ckp->sdata, *dsdata; - stratum_instance_t *client, *tmp; - double old_diff, diff; - int id = 0, subid = 0; - const char *buf; - proxy_t *proxy; - json_t *val; - - if (unlikely(strlen(cmd) < 6)) { - LOGWARNING("Zero length string passed to update_diff"); - return; - } - buf = cmd + 5; /* "diff=" */ - LOGDEBUG("Update diff: %s", buf); - - val = json_loads(buf, 0, NULL); - if (unlikely(!val)) { - LOGWARNING("Failed to json decode in update_diff"); - return; - } - json_get_int(&id, val, "proxy"); - json_get_int(&subid, val, "subproxy"); - json_dblcpy(&diff, val, "diff"); - json_decref(val); - - LOGINFO("Got updated diff for proxy %d:%d", id, subid); - proxy = existing_subproxy(sdata, id, subid); - if (!proxy) { - LOGINFO("No existing subproxy %d:%d to update diff", id, subid); - return; - } - - /* Support fractional difficulty values. Set minimum to a very small - * positive value to prevent division by zero. */ - if (unlikely(diff < 0.000001)) - diff = 0.000001; - - dsdata = proxy->sdata; - - if (unlikely(!dsdata->current_workbase)) { - LOGINFO("No current workbase to update diff yet"); - return; - } - - ck_wlock(&dsdata->workbase_lock); - old_diff = proxy->diff; - dsdata->current_workbase->diff = proxy->diff = diff; - ck_wunlock(&dsdata->workbase_lock); - - if (old_diff < diff) - return; - - /* If the diff has dropped, iterate over all the clients and check - * they're at or below the new diff, and update it if not. */ - ck_rlock(&sdata->instance_lock); - HASH_ITER(hh, sdata->stratum_instances, client, tmp) { - if (client->proxyid != id) - continue; - if (client->subproxyid != subid) - continue; - if (client->diff > diff) { - client->diff = diff; - stratum_send_diff(sdata, client); - } - } - ck_runlock(&sdata->instance_lock); -} - -#if 0 -static void generator_drop_proxy(ckpool_t *ckp, const int64_t id, const int subid) -{ - char msg[256]; - - sprintf(msg, "dropproxy=%ld:%d", id, subid); - send_proc(ckp->generator,msg); -} -#endif - -static void free_proxy(ckpool_t *ckp, proxy_t *proxy) -{ - sdata_t *dsdata = proxy->sdata; - - /* Delete any shares in the proxy's hashtable. */ - if (dsdata) { - share_t *share, *tmpshare; - workbase_t *wb, *tmpwb; - - mutex_lock(&dsdata->share_lock); - HASH_ITER(hh, dsdata->shares, share, tmpshare) { - HASH_DEL(dsdata->shares, share); - dealloc(share); - } - mutex_unlock(&dsdata->share_lock); - - /* Do we need to check readcount here if freeing the proxy? */ - ck_wlock(&dsdata->workbase_lock); - HASH_ITER(hh, dsdata->workbases, wb, tmpwb) { - HASH_DEL(dsdata->workbases, wb); - clear_workbase(ckp, wb); - } - ck_wunlock(&dsdata->workbase_lock); - } - - free(proxy->sdata); - free(proxy); -} - -/* Remove subproxies that are flagged dead. Then see if there - * are any retired proxies that no longer have any other subproxies and reap - * those. */ -static void reap_proxies(ckpool_t *ckp, sdata_t *sdata) -{ - proxy_t *proxy, *proxytmp, *subproxy, *subtmp; - int dead = 0; - - if (!ckp->proxy) - return; - - mutex_lock(&sdata->proxy_lock); - HASH_ITER(hh, sdata->proxies, proxy, proxytmp) { - HASH_ITER(sh, proxy->subproxies, subproxy, subtmp) { - if (!subproxy->bound_clients && !subproxy->dead) { - /* Reset the counter to reuse this proxy */ - subproxy->clients = 0; - continue; - } - if (proxy == subproxy) - continue; - if (subproxy->bound_clients) - continue; - if (!subproxy->dead) - continue; - if (unlikely(!subproxy->subid)) { - LOGWARNING("Unexepectedly found proxy %d:%d as subproxy of %d:%d", - subproxy->id, subproxy->subid, proxy->id, proxy->subid); - continue; - } - if (unlikely(subproxy == sdata->proxy)) { - LOGWARNING("Unexepectedly found proxy %d:%d as current", - subproxy->id, subproxy->subid); - continue; - } - dead++; - HASH_DELETE(sh, proxy->subproxies, subproxy); - proxy->subproxy_count--; - free_proxy(ckp, subproxy); - } - /* Should we reap the parent proxy too?*/ - if (!proxy->deleted || proxy->subproxy_count > 1 || proxy->bound_clients) - continue; - HASH_DELETE(sh, proxy->subproxies, proxy); - HASH_DELETE(hh, sdata->proxies, proxy); - free_proxy(ckp, proxy); - } - mutex_unlock(&sdata->proxy_lock); - - if (dead) - LOGINFO("Stratifier discarded %d dead proxies", dead); -} - -/* Enter with instance_lock held */ -static stratum_instance_t *__instance_by_id(sdata_t *sdata, const int64_t id) -{ - stratum_instance_t *client; - - HASH_FIND_I64(sdata->stratum_instances, &id, client); - return client; -} - -/* Increase the reference count of instance */ -static void __inc_instance_ref(stratum_instance_t *client) -{ - client->ref++; -} - -/* Find an __instance_by_id and increase its reference count allowing us to - * use this instance outside of instance_lock without fear of it being - * dereferenced. Does not return dropped clients still on the list. */ -static inline stratum_instance_t *ref_instance_by_id(sdata_t *sdata, const int64_t id) -{ - stratum_instance_t *client; - - ck_wlock(&sdata->instance_lock); - client = __instance_by_id(sdata, id); - if (client) { - if (unlikely(client->dropped)) - client = NULL; - else - __inc_instance_ref(client); - } - ck_wunlock(&sdata->instance_lock); - - return client; -} - -static void __drop_client(sdata_t *sdata, stratum_instance_t *client, bool lazily, char **msg) -{ - user_instance_t *user = client->user_instance; - - if (unlikely(client->node)) - DL_DELETE2(sdata->node_instances, client, node_prev, node_next); - else if (unlikely(client->trusted)) - DL_DELETE2(sdata->remote_instances, client, remote_prev, remote_next); - - if (client->workername) { - if (user) { - /* No message anywhere if throttled, too much flood and - * these only can be LOGNOTICE messages. - */ - if (!user->throttled) { - ASPRINTF(msg, "Dropped client %s %s user %s worker %s %s", - client->identity, client->address, - user->username, client->workername, lazily ? "lazily" : ""); - } - } else { - ASPRINTF(msg, "Dropped client %s %s no user worker %s %s", - client->identity, client->address, client->workername, - lazily ? "lazily" : ""); - } - } else { - /* Workerless client. Too noisy to log them all */ - } - __del_client(sdata, client); - __kill_instance(sdata, client); -} - -static int __dec_instance_ref(stratum_instance_t *client) -{ - return --client->ref; -} - -/* Decrease the reference count of instance. */ -static void _dec_instance_ref(sdata_t *sdata, stratum_instance_t *client, const char *file, - const char *func, const int line) -{ - char_entry_t *entries = NULL; - bool dropped = false; - char *msg = NULL; - int ref; - - ck_wlock(&sdata->instance_lock); - ref = __dec_instance_ref(client); - /* See if there are any instances that were dropped that could not be - * moved due to holding a reference and drop them now. */ - if (unlikely(client->dropped && !ref)) { - dropped = true; - __drop_client(sdata, client, true, &msg); - if (msg) - add_msg_entry(&entries, &msg); - } - ck_wunlock(&sdata->instance_lock); - - if (entries) - notice_msg_entries(&entries); - /* This should never happen */ - if (unlikely(ref < 0)) - LOGERR("Instance ref count dropped below zero from %s %s:%d", file, func, line); - - if (dropped) - reap_proxies(sdata->ckp, sdata); -} - -#define dec_instance_ref(sdata, instance) _dec_instance_ref(sdata, instance, __FILE__, __func__, __LINE__) - -/* If we have a no longer used stratum instance in the recycled linked list, - * use that, otherwise calloc a fresh one. */ -static stratum_instance_t *__recruit_stratum_instance(sdata_t *sdata) -{ - stratum_instance_t *client = sdata->recycled_instances; - - if (client) - DL_DELETE2(sdata->recycled_instances, client, recycled_prev, recycled_next); - else { - client = ckzalloc(sizeof(stratum_instance_t)); - sdata->stratum_generated++; - } - return client; -} - -/* Enter with write instance_lock held, drops and grabs it again */ -static stratum_instance_t *__stratum_add_instance(ckpool_t *ckp, int64_t id, const char *address, - int server) -{ - sdata_t *sdata = ckp->sdata; - stratum_instance_t *client; - int64_t pass_id; - - client = __recruit_stratum_instance(sdata); - ck_wunlock(&sdata->instance_lock); - - client->start_time = time(NULL); - client->id = id; - client->session_id = ++sdata->session_id; - strcpy(client->address, address); - /* Sanity check to not overflow lookup in ckp->serverurl[] */ - if (server >= ckp->serverurls) - server = 0; - client->server = server; - client->diff = client->old_diff = ckp->startdiff; - if (ckp->server_highdiff && ckp->server_highdiff[server]) { - client->suggest_diff = ckp->highdiff; - if (client->suggest_diff > client->diff) - client->diff = client->old_diff = client->suggest_diff; - } - client->ckp = ckp; - tv_time(&client->ldc); - /* Points to ckp sdata in ckpool mode, but is changed later in proxy - * mode . */ - client->sdata = sdata; - if ((pass_id = subclient(id))) { - stratum_instance_t *remote = __instance_by_id(sdata, pass_id); - - id &= 0xffffffffll; - if (remote && remote->node) { - client->latency = remote->latency; - LOGINFO("Client %s inherited node latency of %d", - client->identity, client->latency); - sprintf(client->identity, "node:%"PRId64" subclient:%"PRId64, - pass_id, id); - } else if (remote && remote->trusted) { - sprintf(client->identity, "remote:%"PRId64" subclient:%"PRId64, - pass_id, id); - } else { /* remote->passthrough remaining */ - sprintf(client->identity, "passthrough:%"PRId64" subclient:%"PRId64, - pass_id, id); - } - client->virtualid = connector_newclientid(ckp); - } else { - sprintf(client->identity, "%"PRId64, id); - client->virtualid = id; - } - - ck_wlock(&sdata->instance_lock); - HASH_ADD_I64(sdata->stratum_instances, id, client); - return client; -} - -static uint64_t disconnected_sessionid_exists(sdata_t *sdata, const int session_id, - const int64_t id) -{ - session_t *session; - int64_t old_id = 0; - uint64_t ret = 0; - - ck_wlock(&sdata->instance_lock); - HASH_FIND_INT(sdata->disconnected_sessions, &session_id, session); - if (!session) - goto out_unlock; - HASH_DEL(sdata->disconnected_sessions, session); - sdata->stats.disconnected--; - ret = session->enonce1_64; - old_id = session->client_id; - dealloc(session); -out_unlock: - ck_wunlock(&sdata->instance_lock); - - if (ret) - LOGINFO("Reconnecting old instance %"PRId64" to instance %"PRId64, old_id, id); - return ret; -} - -static inline bool client_active(stratum_instance_t *client) -{ - return (client->authorised && !client->dropped); -} - -static inline bool remote_server(stratum_instance_t *client) -{ - return (client->node || client->passthrough || client->trusted); -} - -/* Ask the connector asynchronously to send us dropclient commands if this - * client no longer exists. */ -static void connector_test_client(ckpool_t *ckp, const int64_t id) -{ - char buf[256]; - - LOGDEBUG("Stratifier requesting connector test client %"PRId64, id); - snprintf(buf, 255, "testclient=%"PRId64, id); - send_proc(ckp->connector, buf); -} - -/* For creating a list of sends without locking that can then be concatenated - * to the stratum_sends list. Minimises locking and avoids taking recursive - * locks. Sends only to sdata bound clients (everyone in ckpool) */ -static void stratum_broadcast(sdata_t *sdata, json_t *val, const int msg_type) -{ - ckpool_t *ckp = sdata->ckp; - sdata_t *ckp_sdata = ckp->sdata; - stratum_instance_t *client, *tmp; - ckmsg_t *bulk_send = NULL; - int messages = 0; - - if (unlikely(!val)) { - LOGERR("Sent null json to stratum_broadcast"); - return; - } - - if (ckp->node) { - json_decref(val); - return; - } - - ck_rlock(&ckp_sdata->instance_lock); - HASH_ITER(hh, ckp_sdata->stratum_instances, client, tmp) { - ckmsg_t *client_msg; - smsg_t *msg; - - if (sdata != ckp_sdata && client->sdata != sdata) - continue; - - if (!client_active(client) || remote_server(client)) - continue; - - /* Only send messages to whitelisted clients */ - if (msg_type == SM_MSG && !client->messages) - continue; - - client_msg = ckalloc(sizeof(ckmsg_t)); - msg = ckzalloc(sizeof(smsg_t)); - if (subclient(client->id)) - json_set_string(val, "node.method", stratum_msgs[msg_type]); - msg->json_msg = json_deep_copy(val); - msg->client_id = client->id; - client_msg->data = msg; - DL_APPEND(bulk_send, client_msg); - messages++; - } - ck_runlock(&ckp_sdata->instance_lock); - - json_decref(val); - - if (likely(bulk_send)) - ssend_bulk_append(sdata, bulk_send, messages); -} - -static void stratum_add_send(sdata_t *sdata, json_t *val, const int64_t client_id, - const int msg_type) -{ - ckpool_t *ckp = sdata->ckp; - int64_t remote_id; - smsg_t *msg; - - if (ckp->node) { - /* Node shouldn't be sending any messages as it only uses the - * stratifier for monitoring activity. */ - json_decref(val); - return; - } - - if ((remote_id = subclient(client_id))) { - stratum_instance_t *remote = ref_instance_by_id(sdata, remote_id); - - if (unlikely(!remote)) { - json_decref(val); - return; - } - if (remote->trusted) - json_set_string(val, "method", stratum_msgs[msg_type]); - else /* Both remote->node and remote->passthrough */ - json_set_string(val, "node.method", stratum_msgs[msg_type]); - dec_instance_ref(sdata, remote); - } - LOGDEBUG("Sending stratum message %s", stratum_msgs[msg_type]); - msg = ckzalloc(sizeof(smsg_t)); - msg->json_msg = val; - msg->client_id = client_id; - if (likely(ckmsgq_add(sdata->ssends, msg))) - return; - json_decref(msg->json_msg); - free(msg); -} - -static void drop_client(ckpool_t *ckp, sdata_t *sdata, const int64_t id) -{ - char_entry_t *entries = NULL; - stratum_instance_t *client; - char *msg = NULL; - - LOGINFO("Stratifier asked to drop client %"PRId64, id); - - ck_wlock(&sdata->instance_lock); - client = __instance_by_id(sdata, id); - if (client && !client->dropped) { - __disconnect_session(sdata, client); - /* If the client is still holding a reference, don't drop them - * now but wait till the reference is dropped */ - if (!client->ref) { - __drop_client(sdata, client, false, &msg); - if (msg) - add_msg_entry(&entries, &msg); - } else - client->dropped = true; - } - ck_wunlock(&sdata->instance_lock); - - if (entries) - notice_msg_entries(&entries); - reap_proxies(ckp, sdata); -} - -static void stratum_broadcast_message(sdata_t *sdata, const char *msg) -{ - json_t *json_msg; - - JSON_CPACK(json_msg, "{sosss[s]}", "id", json_null(), "method", "client.show_message", - "params", msg); - stratum_broadcast(sdata, json_msg, SM_MSG); -} - -/* Send a generic reconnect to all clients without parameters to make them - * reconnect to the same server. */ -static void request_reconnect(sdata_t *sdata, const char *cmd) -{ - char *port = strdupa(cmd), *url = NULL; - stratum_instance_t *client, *tmp; - json_t *json_msg; - - strsep(&port, ":"); - if (port) - url = strsep(&port, ","); - if (url && port) { - JSON_CPACK(json_msg, "{sosss[ssi]}", "id", json_null(), "method", "client.reconnect", - "params", url, port, 0); - } else - JSON_CPACK(json_msg, "{sosss[]}", "id", json_null(), "method", "client.reconnect", - "params"); - stratum_broadcast(sdata, json_msg, SM_RECONNECT); - - /* Tag all existing clients as dropped now so they can be removed - * lazily */ - ck_wlock(&sdata->instance_lock); - HASH_ITER(hh, sdata->stratum_instances, client, tmp) { - client->dropped = true; - } - ck_wunlock(&sdata->instance_lock); -} - -static void reset_bestshares(sdata_t *sdata) -{ - user_instance_t *user, *tmpuser; - stratum_instance_t *client, *tmp; - - /* Can do this unlocked since it's just zeroing the values */ - sdata->stats.accounted_diff_shares = - sdata->stats.accounted_shares = - sdata->stats.accounted_rejects = 0; - sdata->stats.best_diff = 0; - - ck_rlock(&sdata->instance_lock); - HASH_ITER(hh, sdata->stratum_instances, client, tmp) { - client->best_diff = 0; - } - HASH_ITER(hh, sdata->user_instances, user, tmpuser) { - worker_instance_t *worker; - - user->best_diff = 0; - DL_FOREACH(user->worker_instances, worker) { - worker->best_diff = 0; - } - } - ck_runlock(&sdata->instance_lock); -} - -static user_instance_t *get_user(sdata_t *sdata, const char *username); - -static user_instance_t *user_by_workername(sdata_t *sdata, const char *workername) -{ - char *username = strdupa(workername), *ignore; - user_instance_t *user; - - ignore = username; - strsep(&ignore, "._"); - - /* Find the user first */ - user = get_user(sdata, username); - return user; -} - -static worker_instance_t *get_worker(sdata_t *sdata, user_instance_t *user, const char *workername); - -static json_t *worker_stats(const worker_instance_t *worker) -{ - char suffix1[16], suffix5[16], suffix60[16], suffix1440[16], suffix10080[16]; - json_t *val; - double ghs; - - ghs = worker->dsps1 * nonces; - suffix_string(ghs, suffix1, 16, 0); - - ghs = worker->dsps5 * nonces; - suffix_string(ghs, suffix5, 16, 0); - - ghs = worker->dsps60 * nonces; - suffix_string(ghs, suffix60, 16, 0); - - ghs = worker->dsps1440 * nonces; - suffix_string(ghs, suffix1440, 16, 0); - - ghs = worker->dsps10080 * nonces; - suffix_string(ghs, suffix10080, 16, 0); - - JSON_CPACK(val, "{ss,ss,ss,ss,ss}", - "hashrate1m", suffix1, - "hashrate5m", suffix5, - "hashrate1hr", suffix60, - "hashrate1d", suffix1440, - "hashrate7d", suffix10080); - return val; -} - -static json_t *user_stats(const user_instance_t *user) -{ - char suffix1[16], suffix5[16], suffix60[16], suffix1440[16], suffix10080[16]; - json_t *val; - double ghs; - - ghs = user->dsps1 * nonces; - suffix_string(ghs, suffix1, 16, 0); - - ghs = user->dsps5 * nonces; - suffix_string(ghs, suffix5, 16, 0); - - ghs = user->dsps60 * nonces; - suffix_string(ghs, suffix60, 16, 0); - - ghs = user->dsps1440 * nonces; - suffix_string(ghs, suffix1440, 16, 0); - - ghs = user->dsps10080 * nonces; - suffix_string(ghs, suffix10080, 16, 0); - - JSON_CPACK(val, "{ss,ss,ss,ss,ss,sI,sI}", - "hashrate1m", suffix1, - "hashrate5m", suffix5, - "hashrate1hr", suffix60, - "hashrate1d", suffix1440, - "hashrate7d", suffix10080, - "shares", user->shares, - "authorised", user->auth_time); - return val; -} - -/* Adjust workinfo id to virtual value for remote trusted workinfos */ -static void remap_workinfo_id(sdata_t *sdata, json_t *val, const int64_t client_id) -{ - int64_t mapped_id, id; - workbase_t *wb; - - json_get_int64(&id, val, "workinfoid"); - - ck_rlock(&sdata->workbase_lock); - wb = __find_remote_workbase(sdata, id, client_id); - if (likely(wb)) - mapped_id = wb->mapped_id; - else - mapped_id = id; - ck_runlock(&sdata->workbase_lock); - - /* Replace value with mapped id */ - json_set_int64(val, "workinfoid", mapped_id); -} - -static void block_share_summary(sdata_t *sdata) -{ - double bdiff, sdiff; - - if (unlikely(!sdata->current_workbase || !sdata->current_workbase->network_diff)) - return; - - sdiff = sdata->stats.accounted_diff_shares; - bdiff = sdiff / sdata->current_workbase->network_diff * 100; - LOGWARNING("Block solved after %.0lf shares at %.1f%% diff", - sdiff, bdiff); -} - -static void block_solve(ckpool_t *ckp, json_t *val) -{ - char *msg, *workername = NULL; - sdata_t *sdata = ckp->sdata; - char cdfield[64]; - double diff = 0; - int height = 0; - ts_t ts_now; - - ts_realtime(&ts_now); - sprintf(cdfield, "%lu,%lu", ts_now.tv_sec, ts_now.tv_nsec); - - json_set_string(val, "confirmed", "1"); - json_set_string(val, "createdate", cdfield); - json_set_string(val, "createcode", __func__); - json_get_int(&height, val, "height"); - json_get_double(&diff, val, "diff"); - json_get_string(&workername, val, "workername"); - - if (!workername) { - ASPRINTF(&msg, "Block solved by %s!", ckp->name); - LOGWARNING("Solved and confirmed block!"); - } else { - json_t *user_val, *worker_val; - worker_instance_t *worker; - user_instance_t *user; - char *s; - - ASPRINTF(&msg, "Block %d solved by %s @ %s!", height, workername, ckp->name); - LOGWARNING("Solved and confirmed block %d by %s", height, workername); - user = user_by_workername(sdata, workername); - worker = get_worker(sdata, user, workername); - - ck_rlock(&sdata->instance_lock); - user_val = user_stats(user); - worker_val = worker_stats(worker); - ck_runlock(&sdata->instance_lock); - - s = json_dumps(user_val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER); - json_decref(user_val); - LOGWARNING("User %s:%s", user->username, s); - dealloc(s); - s = json_dumps(worker_val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER); - json_decref(worker_val); - LOGWARNING("Worker %s:%s", workername, s); - dealloc(s); - } - stratum_broadcast_message(sdata, msg); - free(msg); - - free(workername); - - block_share_summary(sdata); - reset_bestshares(sdata); -} - -static void block_reject(json_t *val) -{ - int height = 0; - - json_get_int(&height, val, "height"); - - LOGWARNING("Submitted, but had block %d rejected", height); -} - -/* Some upstream pools (like p2pool) don't update stratum often enough and - * miners disconnect if they don't receive regular communication so send them - * a ping at regular intervals */ -static void broadcast_ping(sdata_t *sdata) -{ - json_t *json_msg; - - JSON_CPACK(json_msg, "{s:[],s:i,s:s}", - "params", - "id", 42, - "method", "mining.ping"); - - stratum_broadcast(sdata, json_msg, SM_PING); -} - -static void ckmsgq_stats(ckmsgq_t *ckmsgq, const int size, json_t **val) -{ - int64_t memsize, generated; - ckmsg_t *msg; - int objects; - - mutex_lock(ckmsgq->lock); - DL_COUNT(ckmsgq->msgs, msg, objects); - generated = ckmsgq->messages; - mutex_unlock(ckmsgq->lock); - - memsize = (sizeof(ckmsg_t) + size) * objects; - JSON_CPACK(*val, "{si,si,sI}", "count", objects, "memory", memsize, "generated", generated); -} - -char *stratifier_stats(ckpool_t *ckp, void *data) -{ - json_t *val = json_object(), *subval; - int64_t memsize, generated; - sdata_t *sdata = data; - int objects; - char *buf; - - ck_rlock(&sdata->workbase_lock); - objects = HASH_COUNT(sdata->workbases); - memsize = SAFE_HASH_OVERHEAD(sdata->workbases) + sizeof(workbase_t) * objects; - generated = sdata->workbases_generated; - JSON_CPACK(subval, "{si,si,sI}", "count", objects, "memory", memsize, "generated", generated); - json_set_object(val, "workbases", subval); - objects = HASH_COUNT(sdata->remote_workbases); - memsize = SAFE_HASH_OVERHEAD(sdata->remote_workbases) + sizeof(workbase_t) * objects; - ck_runlock(&sdata->workbase_lock); - - JSON_CPACK(subval, "{si,si}", "count", objects, "memory", memsize); - json_set_object(val, "remote_workbases", subval); - - ck_rlock(&sdata->instance_lock); - if (ckp->btcsolo) { - user_instance_t *user, *tmpuser; - int subobjects; - - objects = 0; - memsize = 0; - HASH_ITER(hh, sdata->user_instances, user, tmpuser) { - subobjects = HASH_COUNT(user->userwbs); - objects += subobjects; - memsize += SAFE_HASH_OVERHEAD(user->userwbs) + sizeof(struct userwb) * subobjects; - } - generated = sdata->userwbs_generated; - JSON_CPACK(subval, "{si,si,sI}", "count", objects, "memory", memsize, "generated", generated); - json_set_object(val, "userwbs", subval); - } - - objects = HASH_COUNT(sdata->user_instances); - memsize = SAFE_HASH_OVERHEAD(sdata->user_instances) + sizeof(stratum_instance_t) * objects; - JSON_CPACK(subval, "{si,si}", "count", objects, "memory", memsize); - json_set_object(val, "users", subval); - - objects = HASH_COUNT(sdata->stratum_instances); - memsize = SAFE_HASH_OVERHEAD(sdata->stratum_instances); - generated = sdata->stratum_generated; - JSON_CPACK(subval, "{si,si,sI}", "count", objects, "memory", memsize, "generated", generated); - json_set_object(val, "clients", subval); - - objects = sdata->stats.disconnected; - generated = sdata->disconnected_generated; - memsize = SAFE_HASH_OVERHEAD(sdata->disconnected_sessions); - memsize += sizeof(session_t) * sdata->stats.disconnected; - JSON_CPACK(subval, "{si,si,sI}", "count", objects, "memory", memsize, "generated", generated); - json_set_object(val, "disconnected", subval); - ck_runlock(&sdata->instance_lock); - - mutex_lock(&sdata->share_lock); - generated = sdata->shares_generated; - objects = HASH_COUNT(sdata->shares); - memsize = SAFE_HASH_OVERHEAD(sdata->shares) + sizeof(share_t) * objects; - mutex_unlock(&sdata->share_lock); - - JSON_CPACK(subval, "{si,si,sI}", "count", objects, "memory", memsize, "generated", generated); - json_set_object(val, "shares", subval); - - ck_rlock(&sdata->txn_lock); - objects = HASH_COUNT(sdata->txns); - memsize = SAFE_HASH_OVERHEAD(sdata->txns) + sizeof(txntable_t) * objects; - generated = sdata->txns_generated; - JSON_CPACK(subval, "{si,si,sI}", "count", objects, "memory", memsize, "generated", generated); - json_set_object(val, "transactions", subval); - ck_runlock(&sdata->txn_lock); - - ckmsgq_stats(sdata->ssends, sizeof(smsg_t), &subval); - json_set_object(val, "ssends", subval); - /* Don't know exactly how big the string is so just count the pointer for now */ - ckmsgq_stats(sdata->srecvs, sizeof(char *), &subval); - json_set_object(val, "srecvs", subval); - ckmsgq_stats(sdata->stxnq, sizeof(json_params_t), &subval); - json_set_object(val, "stxnq", subval); - - buf = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER); - json_decref(val); - LOGNOTICE("Stratifier stats: %s", buf); - return buf; -} - -/* Send a single client a reconnect request, setting the time we sent the - * request so we can drop the client lazily if it hasn't reconnected on its - * own more than one minute later if we call reconnect again */ -static void reconnect_client(sdata_t *sdata, stratum_instance_t *client) -{ - json_t *json_msg; - - /* Already requested? */ - if (client->reconnect_request) { - if (time(NULL) - client->reconnect_request >= 60) - connector_drop_client(sdata->ckp, client->id); - return; - } - client->reconnect_request = time(NULL); - JSON_CPACK(json_msg, "{sosss[]}", "id", json_null(), "method", "client.reconnect", - "params"); - stratum_add_send(sdata, json_msg, client->id, SM_RECONNECT); -} - -static void dead_proxy(ckpool_t *ckp, sdata_t *sdata, const char *buf) -{ - int id = 0, subid = 0; - - sscanf(buf, "deadproxy=%d:%d", &id, &subid); - dead_proxyid(sdata, id, subid, false, false); - reap_proxies(ckp, sdata); -} - -static void del_proxy(ckpool_t *ckp, sdata_t *sdata, const char *buf) -{ - int id = 0, subid = 0; - - sscanf(buf, "delproxy=%d:%d", &id, &subid); - dead_proxyid(sdata, id, subid, false, true); - reap_proxies(ckp, sdata); -} - -static void reconnect_client_id(sdata_t *sdata, const int64_t client_id) -{ - stratum_instance_t *client; - - client = ref_instance_by_id(sdata, client_id); - if (!client) { - LOGINFO("reconnect_client_id failed to find client %"PRId64, client_id); - return; - } - client->reconnect = true; - reconnect_client(sdata, client); - dec_instance_ref(sdata, client); -} - -/* API commands */ - -static json_t *userinfo(const user_instance_t *user) -{ - json_t *val; - - JSON_CPACK(val, "{ss,si,si,sf,sf,sf,sf,sf,sf,si}", - "user", user->username, "id", user->id, "workers", user->workers, - "bestdiff", user->best_diff, "dsps1", user->dsps1, "dsps5", user->dsps5, - "dsps60", user->dsps60, "dsps1440", user->dsps1440, "dsps10080", user->dsps10080, - "lastshare", user->last_share.tv_sec); - return val; -} - -static void getuser(sdata_t *sdata, const char *buf, int *sockd) -{ - json_t *val = NULL, *res = NULL; - char *username = NULL; - user_instance_t *user; - json_error_t err_val; - - val = json_loads(buf, 0, &err_val); - if (unlikely(!val)) { - res = json_encode_errormsg(&err_val); - goto out; - } - if (!json_get_string(&username, val, "user")) { - res = json_errormsg("Failed to find user key"); - goto out; - } - if (!strlen(username)) { - res = json_errormsg("Zero length user key"); - goto out; - } - user = get_user(sdata, username); - res = userinfo(user); -out: - if (val) - json_decref(val); - free(username); - send_api_response(res, *sockd); -} - -static void userclients(sdata_t *sdata, const char *buf, int *sockd) -{ - json_t *val = NULL, *res = NULL, *client_arr; - stratum_instance_t *client; - char *username = NULL; - user_instance_t *user; - json_error_t err_val; - - val = json_loads(buf, 0, &err_val); - if (unlikely(!val)) { - res = json_encode_errormsg(&err_val); - goto out; - } - if (!json_get_string(&username, val, "user")) { - res = json_errormsg("Failed to find user key"); - goto out; - } - if (!strlen(username)) { - res = json_errormsg("Zero length user key"); - goto out; - } - user = get_user(sdata, username); - client_arr = json_array(); - - ck_rlock(&sdata->instance_lock); - DL_FOREACH2(user->clients, client, user_next) { - json_array_append_new(client_arr, json_integer(client->id)); - } - ck_runlock(&sdata->instance_lock); - - JSON_CPACK(res, "{ss,so}", "user", username, "clients", client_arr); -out: - if (val) - json_decref(val); - free(username); - send_api_response(res, *sockd); -} - -static void workerclients(sdata_t *sdata, const char *buf, int *sockd) -{ - json_t *val = NULL, *res = NULL, *client_arr; - char *tmp, *username, *workername = NULL; - stratum_instance_t *client; - user_instance_t *user; - json_error_t err_val; - - val = json_loads(buf, 0, &err_val); - if (unlikely(!val)) { - res = json_encode_errormsg(&err_val); - goto out; - } - if (!json_get_string(&workername, val, "worker")) { - res = json_errormsg("Failed to find worker key"); - goto out; - } - if (!strlen(workername)) { - res = json_errormsg("Zero length worker key"); - goto out; - } - tmp = strdupa(workername); - username = strsep(&tmp, "._"); - user = get_user(sdata, username); - client_arr = json_array(); - - ck_rlock(&sdata->instance_lock); - DL_FOREACH2(user->clients, client, user_next) { - if (strcmp(client->workername, workername)) - continue; - json_array_append_new(client_arr, json_integer(client->id)); - } - ck_runlock(&sdata->instance_lock); - - JSON_CPACK(res, "{ss,so}", "worker", workername, "clients", client_arr); -out: - if (val) - json_decref(val); - free(workername); - send_api_response(res, *sockd); -} - -static json_t *workerinfo(const user_instance_t *user, const worker_instance_t *worker) -{ - json_t *val; - - JSON_CPACK(val, "{ss,ss,si,sf,sf,sf,sf,si,sf,si,sb}", - "user", user->username, "worker", worker->workername, "id", user->id, - "dsps1", worker->dsps1, "dsps5", worker->dsps5, "dsps60", worker->dsps60, - "dsps1440", worker->dsps1440, "lastshare", worker->last_share.tv_sec, - "bestdiff", worker->best_diff, "mindiff", worker->mindiff, "idle", worker->idle); - return val; -} - -static void getworker(sdata_t *sdata, const char *buf, int *sockd) -{ - char *tmp, *username, *workername = NULL; - json_t *val = NULL, *res = NULL; - worker_instance_t *worker; - user_instance_t *user; - json_error_t err_val; - - val = json_loads(buf, 0, &err_val); - if (unlikely(!val)) { - res = json_encode_errormsg(&err_val); - goto out; - } - if (!json_get_string(&workername, val, "worker")) { - res = json_errormsg("Failed to find worker key"); - goto out; - } - if (!strlen(workername)) { - res = json_errormsg("Zero length worker key"); - goto out; - } - tmp = strdupa(workername); - username = strsep(&tmp, "._"); - user = get_user(sdata, username); - worker = get_worker(sdata, user, workername); - res = workerinfo(user, worker); -out: - if (val) - json_decref(val); - free(workername); - send_api_response(res, *sockd); -} - -static void getworkers(sdata_t *sdata, int *sockd) -{ - json_t *val = NULL, *worker_arr; - worker_instance_t *worker; - user_instance_t *user; - - worker_arr = json_array(); - - ck_rlock(&sdata->instance_lock); - for (user = sdata->user_instances; user; user = user->hh.next) { - DL_FOREACH(user->worker_instances, worker) { - json_array_append_new(worker_arr, workerinfo(user, worker)); - } - } - ck_runlock(&sdata->instance_lock); - - JSON_CPACK(val, "{so}", "workers", worker_arr); - send_api_response(val, *sockd); -} - -static void getusers(sdata_t *sdata, int *sockd) -{ - json_t *val = NULL, *user_array; - user_instance_t *user; - - user_array = json_array(); - - ck_rlock(&sdata->instance_lock); - for (user = sdata->user_instances; user; user = user->hh.next) { - json_array_append_new(user_array, userinfo(user)); - } - ck_runlock(&sdata->instance_lock); - - JSON_CPACK(val, "{so}", "users", user_array); - send_api_response(val, *sockd); -} - -static json_t *clientinfo(const stratum_instance_t *client) -{ - json_t *val = json_object(); - - /* Too many fields for a pack object, do each discretely to keep track */ - json_set_int(val, "id", client->id); - json_set_string(val, "enonce1", client->enonce1); - json_set_string(val, "enonce1var", client->enonce1var); - json_set_int(val, "enonce1_64", client->enonce1_64); - json_set_double(val, "diff", client->diff); - json_set_double(val, "dsps1", client->dsps1); - json_set_double(val, "dsps5", client->dsps5); - json_set_double(val, "dsps60", client->dsps60); - json_set_double(val, "dsps1440", client->dsps1440); - json_set_double(val, "dsps10080", client->dsps10080); - json_set_int(val, "lastshare", client->last_share.tv_sec); - json_set_int(val, "starttime", client->start_time); - json_set_string(val, "address", client->address); - json_set_bool(val, "subscribed", client->subscribed); - json_set_bool(val, "authorised", client->authorised); - json_set_bool(val, "idle", client->idle); - json_set_string(val, "useragent", client->useragent ? client->useragent : ""); - json_set_string(val, "workername", client->workername ? client->workername : ""); - json_set_int(val, "userid", client->user_id); - json_set_int(val, "server", client->server); - json_set_double(val, "bestdiff", client->best_diff); - json_set_int(val, "proxyid", client->proxyid); - json_set_int(val, "subproxyid", client->subproxyid); - - return val; -} - -static void getclient(sdata_t *sdata, const char *buf, int *sockd) -{ - json_t *val = NULL, *res = NULL; - stratum_instance_t *client; - json_error_t err_val; - int64_t client_id; - - val = json_loads(buf, 0, &err_val); - if (unlikely(!val)) { - res = json_encode_errormsg(&err_val); - goto out; - } - if (!json_get_int64(&client_id, val, "id")) { - res = json_errormsg("Failed to find id key"); - goto out; - } - client = ref_instance_by_id(sdata, client_id); - if (!client) { - res = json_errormsg("Failed to find client %"PRId64, client_id); - goto out; - } - res = clientinfo(client); - - dec_instance_ref(sdata, client); -out: - if (val) - json_decref(val); - send_api_response(res, *sockd); -} - -static void getclients(sdata_t *sdata, int *sockd) -{ - json_t *val = NULL, *client_arr; - stratum_instance_t *client; - - client_arr = json_array(); - - ck_rlock(&sdata->instance_lock); - for (client = sdata->stratum_instances; client; client = client->hh.next) { - json_array_append_new(client_arr, clientinfo(client)); - } - ck_runlock(&sdata->instance_lock); - - JSON_CPACK(val, "{so}", "clients", client_arr); - send_api_response(val, *sockd); -} - -static void user_clientinfo(sdata_t *sdata, const char *buf, int *sockd) -{ - json_t *val = NULL, *res = NULL, *client_arr; - stratum_instance_t *client; - char *username = NULL; - user_instance_t *user; - json_error_t err_val; - - val = json_loads(buf, 0, &err_val); - if (unlikely(!val)) { - res = json_encode_errormsg(&err_val); - goto out; - } - if (!json_get_string(&username, val, "user")) { - res = json_errormsg("Failed to find user key"); - goto out; - } - if (!strlen(username)) { - res = json_errormsg("Zero length user key"); - goto out; - } - user = get_user(sdata, username); - client_arr = json_array(); - - ck_rlock(&sdata->instance_lock); - DL_FOREACH2(user->clients, client, user_next) { - json_array_append_new(client_arr, clientinfo(client)); - } - ck_runlock(&sdata->instance_lock); - - JSON_CPACK(res, "{ss,so}", "user", username, "clients", client_arr); -out: - if (val) - json_decref(val); - free(username); - send_api_response(res, *sockd); -} - -static void worker_clientinfo(sdata_t *sdata, const char *buf, int *sockd) -{ - json_t *val = NULL, *res = NULL, *client_arr; - char *tmp, *username, *workername = NULL; - stratum_instance_t *client; - user_instance_t *user; - json_error_t err_val; - - val = json_loads(buf, 0, &err_val); - if (unlikely(!val)) { - res = json_encode_errormsg(&err_val); - goto out; - } - if (!json_get_string(&workername, val, "worker")) { - res = json_errormsg("Failed to find worker key"); - goto out; - } - if (!strlen(workername)) { - res = json_errormsg("Zero length worker key"); - goto out; - } - tmp = strdupa(workername); - username = strsep(&tmp, "._"); - user = get_user(sdata, username); - client_arr = json_array(); - - ck_rlock(&sdata->instance_lock); - DL_FOREACH2(user->clients, client, user_next) { - if (strcmp(client->workername, workername)) - continue; - json_array_append_new(client_arr, clientinfo(client)); - } - ck_runlock(&sdata->instance_lock); - - JSON_CPACK(res, "{ss,so}", "worker", workername, "clients", client_arr); -out: - if (val) - json_decref(val); - free(workername); - send_api_response(res, *sockd); -} - -/* Return the user masked priority value of the proxy */ -static int proxy_prio(const proxy_t *proxy) -{ - int prio = proxy->priority & 0x00000000ffffffff; - - return prio; -} - -static json_t *json_proxyinfo(const proxy_t *proxy) -{ - const proxy_t *parent = proxy->parent; - json_t *val; - - JSON_CPACK(val, "{si,si,si,sf,ss,ss,ss,ss,ss,si,si,si,si,sb,sb,sI,sI,sI,sI,sI,si,sb,sb,si}", - "id", proxy->id, "subid", proxy->subid, "priority", proxy_prio(parent), - "diff", proxy->diff, "baseurl", proxy->baseurl, "url", proxy->url, - "auth", proxy->auth, "pass", proxy->pass, - "enonce1", proxy->enonce1, "enonce1constlen", proxy->enonce1constlen, - "enonce1varlen", proxy->enonce1varlen, "nonce2len", proxy->nonce2len, - "enonce2varlen", proxy->enonce2varlen, "subscribed", proxy->subscribed, - "notified", proxy->notified, "clients", proxy->clients, "maxclients", proxy->max_clients, - "bound_clients", proxy->bound_clients, "combined_clients", parent->combined_clients, - "headroom", proxy->headroom, "subproxy_count", parent->subproxy_count, - "dead", proxy->dead, "global", proxy->global, "userid", proxy->userid); - return val; -} - -static void getproxy(sdata_t *sdata, const char *buf, int *sockd) -{ - json_t *val = NULL, *res = NULL; - json_error_t err_val; - int id, subid = 0; - proxy_t *proxy; - - val = json_loads(buf, 0, &err_val); - if (unlikely(!val)) { - res = json_encode_errormsg(&err_val); - goto out; - } - if (!json_get_int(&id, val, "id")) { - res = json_errormsg("Failed to find id key"); - goto out; - } - json_get_int(&subid, val, "subid"); - if (!subid) - proxy = existing_proxy(sdata, id); - else - proxy = existing_subproxy(sdata, id, subid); - if (!proxy) { - res = json_errormsg("Failed to find proxy %d:%d", id, subid); - goto out; - } - res = json_proxyinfo(proxy); -out: - if (val) - json_decref(val); - send_api_response(res, *sockd); -} - -static void proxyinfo(sdata_t *sdata, const char *buf, int *sockd) -{ - json_t *val = NULL, *res = NULL, *arr_val = json_array(); - proxy_t *proxy, *subproxy; - bool all = true; - int userid = 0; - - if (buf) { - /* See if there's a userid specified */ - val = json_loads(buf, 0, NULL); - if (json_get_int(&userid, val, "userid")) - all = false; - } - - mutex_lock(&sdata->proxy_lock); - for (proxy = sdata->proxies; proxy; proxy = proxy->hh.next) { - if (!all && proxy->userid != userid) - continue; - for (subproxy = proxy->subproxies; subproxy; subproxy = subproxy->sh.next) - json_array_append_new(arr_val, json_proxyinfo(subproxy)); - } - mutex_unlock(&sdata->proxy_lock); - - if (val) - json_decref(val); - JSON_CPACK(res, "{so}", "proxies", arr_val); - send_api_response(res, *sockd); -} - -static void setproxy(sdata_t *sdata, const char *buf, int *sockd) -{ - json_t *val = NULL, *res = NULL; - json_error_t err_val; - int id, priority; - proxy_t *proxy; - - val = json_loads(buf, 0, &err_val); - if (unlikely(!val)) { - res = json_encode_errormsg(&err_val); - goto out; - } - if (!json_get_int(&id, val, "id")) { - res = json_errormsg("Failed to find id key"); - goto out; - } - if (!json_get_int(&priority, val, "priority")) { - res = json_errormsg("Failed to find priority key"); - goto out; - } - proxy = existing_proxy(sdata, id); - if (!proxy) { - res = json_errormsg("Failed to find proxy %d", id); - goto out; - } - if (priority != proxy_prio(proxy)) - set_proxy_prio(sdata, proxy, priority); - res = json_proxyinfo(proxy); -out: - if (val) - json_decref(val); - send_api_response(res, *sockd); -} - -static void get_poolstats(sdata_t *sdata, int *sockd) -{ - pool_stats_t *stats = &sdata->stats; - json_t *val; - - mutex_lock(&sdata->stats_lock); - JSON_CPACK(val, "{si,si,si,si,si,sI,sf,sf,sf,sf,sI,sI,sf,sf,sf,sf,sf,sf,sf}", - "start", stats->start_time.tv_sec, "update", stats->last_update.tv_sec, - "workers", stats->workers + stats->remote_workers, "users", stats->users + stats->remote_users, - "disconnected", stats->disconnected, - "shares", stats->accounted_shares, "sps1", stats->sps1, "sps5", stats->sps5, - "sps15", stats->sps15, "sps60", stats->sps60, "accepted", stats->accounted_diff_shares, - "rejected", stats->accounted_rejects, "dsps1", stats->dsps1, "dsps5", stats->dsps5, - "dsps15", stats->dsps15, "dsps60", stats->dsps60, "dsps360", stats->dsps360, - "dsps1440", stats->dsps1440, "dsps10080", stats->dsps10080); - mutex_unlock(&sdata->stats_lock); - - send_api_response(val, *sockd); -} - -static void get_uptime(sdata_t *sdata, int *sockd) -{ - int uptime = time(NULL) - sdata->stats.start_time.tv_sec; - json_t *val; - - JSON_CPACK(val, "{si}", "uptime", uptime); - send_api_response(val, *sockd); -} - -static void stratum_loop(ckpool_t *ckp, proc_instance_t *pi) -{ - sdata_t *sdata = ckp->sdata; - unix_msg_t *umsg = NULL; - int ret = 0; - char *buf; - -retry: - if (umsg) { - Close(umsg->sockd); - free(umsg->buf); - dealloc(umsg); - } - - do { - time_t end_t; - - end_t = time(NULL); - if (end_t - sdata->update_time >= ckp->update_interval) { - sdata->update_time = end_t; - if (!ckp->proxy) { - LOGDEBUG("%ds elapsed in strat_loop, updating gbt base", - ckp->update_interval); - update_base(sdata, GEN_NORMAL); - } else if (!ckp->passthrough) { - LOGDEBUG("%ds elapsed in strat_loop, pinging miners", - ckp->update_interval); - broadcast_ping(sdata); - } - } - - umsg = get_unix_msg(pi); - } while (!umsg); - - buf = umsg->buf; - if (buf[0] == '{') { - json_t *val = json_loads(buf, JSON_DISABLE_EOF_CHECK, NULL); - - /* This is a message for a node */ - if (likely(val)) - ckmsgq_add(sdata->srecvs, val); - goto retry; - } - if (cmdmatch(buf, "ping")) { - LOGDEBUG("Stratifier received ping request"); - send_unix_msg(umsg->sockd, "pong"); - goto retry; - } - if (cmdmatch(buf, "stats")) { - char *msg; - - LOGDEBUG("Stratifier received stats request"); - msg = stratifier_stats(ckp, sdata); - send_unix_msg(umsg->sockd, msg); - goto retry; - } - /* Parse API commands here to return a message to sockd */ - if (cmdmatch(buf, "clients")) { - getclients(sdata, &umsg->sockd); - goto retry; - } - if (cmdmatch(buf, "workers")) { - getworkers(sdata, &umsg->sockd); - goto retry; - } - if (cmdmatch(buf, "users")) { - getusers(sdata, &umsg->sockd); - goto retry; - } - if (cmdmatch(buf, "getclient")) { - getclient(sdata, buf + 10, &umsg->sockd); - goto retry; - } - if (cmdmatch(buf, "getuser")) { - getuser(sdata, buf + 8, &umsg->sockd); - goto retry; - } - if (cmdmatch(buf, "getworker")) { - getworker(sdata, buf + 10, &umsg->sockd); - goto retry; - } - if (cmdmatch(buf, "userclients")) { - userclients(sdata, buf + 12, &umsg->sockd); - goto retry; - } - if (cmdmatch(buf, "workerclients")) { - workerclients(sdata, buf + 14, &umsg->sockd); - goto retry; - } - if (cmdmatch(buf, "getproxy")) { - getproxy(sdata, buf + 9, &umsg->sockd); - goto retry; - } - if (cmdmatch(buf, "setproxy")) { - setproxy(sdata, buf + 9, &umsg->sockd); - goto retry; - } - if (cmdmatch(buf, "poolstats")) { - get_poolstats(sdata, &umsg->sockd); - goto retry; - } - if (cmdmatch(buf, "proxyinfo")) { - proxyinfo(sdata, buf + 10, &umsg->sockd); - goto retry; - } - if (cmdmatch(buf, "ucinfo")) { - user_clientinfo(sdata, buf + 7, &umsg->sockd); - goto retry; - } - if (cmdmatch(buf,"uptime")) { - get_uptime(sdata, &umsg->sockd); - goto retry; - } - if (cmdmatch(buf, "wcinfo")) { - worker_clientinfo(sdata, buf + 7, &umsg->sockd); - goto retry; - } - - LOGDEBUG("Stratifier received request: %s", buf); - if (cmdmatch(buf, "update")) { - update_base(sdata, GEN_PRIORITY); - } else if (cmdmatch(buf, "subscribe")) { - /* Proxifier has a new subscription */ - update_subscribe(ckp, buf); - } else if (cmdmatch(buf, "notify")) { - /* Proxifier has a new notify ready */ - update_notify(ckp, buf); - } else if (cmdmatch(buf, "diff")) { - update_diff(ckp, buf); - } else if (cmdmatch(buf, "dropclient")) { - int64_t client_id; - - ret = sscanf(buf, "dropclient=%"PRId64, &client_id); - if (ret < 0) - LOGDEBUG("Stratifier failed to parse dropclient command: %s", buf); - else - drop_client(ckp, sdata, client_id); - } else if (cmdmatch(buf, "reconnclient")) { - int64_t client_id; - - ret = sscanf(buf, "reconnclient=%"PRId64, &client_id); - if (ret < 0) - LOGWARNING("Stratifier failed to parse reconnclient command: %s", buf); - else - reconnect_client_id(sdata, client_id); - } else if (cmdmatch(buf, "dropall")) { - drop_allclients(ckp); - } else if (cmdmatch(buf, "reconnect")) { - request_reconnect(sdata, buf); - } else if (cmdmatch(buf, "deadproxy")) { - dead_proxy(ckp, sdata, buf); - } else if (cmdmatch(buf, "delproxy")) { - del_proxy(ckp, sdata, buf); - } else if (cmdmatch(buf, "loglevel")) { - sscanf(buf, "loglevel=%d", &ckp->loglevel); - } else if (cmdmatch(buf, "resetshares")) { - reset_bestshares(sdata); - } else - LOGWARNING("Unhandled stratifier message: %s", buf); - goto retry; -} - -static void *blockupdate(void *arg) -{ - ckpool_t *ckp = (ckpool_t *)arg; - sdata_t *sdata = ckp->sdata; - char hash[68]; - - pthread_detach(pthread_self()); - rename_proc("blockupdate"); - - while (42) { - int ret; - - ret = generator_getbest(ckp, hash); - switch (ret) { - case GETBEST_NOTIFY: - cksleep_ms(5000); - break; - case GETBEST_SUCCESS: - if (strcmp(hash, sdata->lastswaphash)) { - update_base(sdata, GEN_PRIORITY); - break; - } - [[fallthrough]]; - case GETBEST_FAILED: - default: - cksleep_ms(ckp->blockpoll); - } - } - return NULL; -} - -/* Enter holding workbase_lock and client a ref count. */ -static void __fill_enonce1data(const workbase_t *wb, stratum_instance_t *client) -{ - if (wb->enonce1constlen) - memcpy(client->enonce1bin, wb->enonce1constbin, wb->enonce1constlen); - if (wb->enonce1varlen) { - memcpy(client->enonce1bin + wb->enonce1constlen, &client->enonce1_64, wb->enonce1varlen); - __bin2hex(client->enonce1var, &client->enonce1_64, wb->enonce1varlen); - } - __bin2hex(client->enonce1, client->enonce1bin, wb->enonce1constlen + wb->enonce1varlen); -} - -/* Create a new enonce1 from the 64 bit enonce1_64 value, using only the number - * of bytes we have to work with when we are proxying with a split nonce2. - * When the proxy space is less than 32 bits to work with, we look for an - * unused enonce1 value and reject clients instead if there is no space left. - * Needs to be entered with client holding a ref count. */ -static bool new_enonce1(ckpool_t *ckp, sdata_t *ckp_sdata, sdata_t *sdata, stratum_instance_t *client) -{ - proxy_t *proxy = NULL; - uint64_t enonce1; - - if (ckp->proxy) { - if (!ckp_sdata->proxy) - return false; - - mutex_lock(&ckp_sdata->proxy_lock); - proxy = sdata->subproxy; - client->proxyid = proxy->id; - client->subproxyid = proxy->subid; - mutex_unlock(&ckp_sdata->proxy_lock); - - if (proxy->clients >= proxy->max_clients) { - LOGWARNING("Proxy reached max clients %"PRId64, proxy->max_clients); - return false; - } - } - - /* Still initialising */ - if (unlikely(!sdata->current_workbase)) - return false; - - /* instance_lock protects enonce1_64. Incrementing a little endian 64bit - * number ensures that no matter how many of the bits we take from the - * left depending on nonce2 length, we'll always get a changing value - * for every next client.*/ - ck_wlock(&ckp_sdata->instance_lock); - enonce1 = le64toh(ckp_sdata->enonce1_64); - enonce1++; - client->enonce1_64 = ckp_sdata->enonce1_64 = htole64(enonce1); - if (proxy) { - client->proxy = proxy; - proxy->clients++; - proxy->bound_clients++; - proxy->parent->combined_clients++; - } - ck_wunlock(&ckp_sdata->instance_lock); - - ck_rlock(&sdata->workbase_lock); - __fill_enonce1data(sdata->current_workbase, client); - ck_runlock(&sdata->workbase_lock); - - return true; -} - -static void stratum_send_message(sdata_t *sdata, const stratum_instance_t *client, const char *msg); - -/* Need to hold sdata->proxy_lock */ -static proxy_t *__best_subproxy(proxy_t *proxy) -{ - proxy_t *subproxy, *best = NULL, *tmp; - int64_t max_headroom; - - proxy->headroom = max_headroom = 0; - HASH_ITER(sh, proxy->subproxies, subproxy, tmp) { - int64_t subproxy_headroom; - - if (subproxy->dead) - continue; - if (!subproxy->sdata->current_workbase) - continue; - subproxy_headroom = subproxy->max_clients - subproxy->clients; - - proxy->headroom += subproxy_headroom; - if (subproxy_headroom > max_headroom) { - best = subproxy; - max_headroom = subproxy_headroom; - } - if (best) - break; - } - return best; -} - -/* Choose the stratifier data for a new client. Use the main ckp_sdata except - * in proxy mode where we find a subproxy based on the current proxy with room - * for more clients. Signal the generator to recruit more subproxies if we are - * running out of room. */ -static sdata_t *select_sdata(ckpool_t *ckp, sdata_t *ckp_sdata, const int userid) -{ - proxy_t *global, *proxy, *tmp, *best = NULL; - - if (!ckp->proxy || ckp->passthrough) - return ckp_sdata; - - /* Proxies are ordered by priority so first available will be the best - * priority */ - mutex_lock(&ckp_sdata->proxy_lock); - best = global = ckp_sdata->proxy; - - HASH_ITER(hh, ckp_sdata->proxies, proxy, tmp) { - if (proxy->userid < userid) - continue; - if (proxy->userid > userid) - break; - best = __best_subproxy(proxy); - if (best) - break; - } - mutex_unlock(&ckp_sdata->proxy_lock); - - if (!best) { - if (!userid) - LOGWARNING("Temporarily insufficient proxies to accept more clients"); - else - LOGNOTICE("Temporarily insufficient proxies for userid %d to accept more clients", userid); - return NULL; - } - if (!userid) { - if (best->id != global->id || current_headroom(ckp_sdata, &proxy) < 2) - generator_recruit(ckp, global->id, 1); - } else { - if (best_userproxy_headroom(ckp_sdata, userid) < 2) - generator_recruit(ckp, best->id, 1); - } - return best->sdata; -} - -static int int_from_sessionid(const char *sessionid) -{ - int ret = 0, slen; - - if (!sessionid) - goto out; - slen = strlen(sessionid) / 2; - if (slen < 1 || slen > 4) - goto out; - - if (!validhex(sessionid)) - goto out; - - sscanf(sessionid, "%x", &ret); -out: - return ret; -} - -static int userid_from_sessionid(sdata_t *sdata, const int session_id) -{ - session_t *session; - int ret = -1; - - ck_wlock(&sdata->instance_lock); - HASH_FIND_INT(sdata->disconnected_sessions, &session_id, session); - if (!session) - goto out_unlock; - HASH_DEL(sdata->disconnected_sessions, session); - sdata->stats.disconnected--; - ret = session->userid; - dealloc(session); -out_unlock: - ck_wunlock(&sdata->instance_lock); - - if (ret != -1) - LOGINFO("Found old session id %d for userid %d", session_id, ret); - return ret; -} - -static int userid_from_sessionip(sdata_t *sdata, const char *address) -{ - session_t *session, *tmp; - int ret = -1; - - ck_wlock(&sdata->instance_lock); - HASH_ITER(hh, sdata->disconnected_sessions, session, tmp) { - if (!strcmp(session->address, address)) { - ret = session->userid; - break; - } - } - if (ret == -1) - goto out_unlock; - HASH_DEL(sdata->disconnected_sessions, session); - sdata->stats.disconnected--; - dealloc(session); -out_unlock: - ck_wunlock(&sdata->instance_lock); - - if (ret != -1) - LOGINFO("Found old session address %s for userid %d", address, ret); - return ret; -} - -/* Extranonce1 must be set here. Needs to be entered with client holding a ref - * count. */ -static json_t *parse_subscribe(stratum_instance_t *client, const int64_t client_id, const json_t *params_val) -{ - ckpool_t *ckp = client->ckp; - sdata_t *sdata, *ckp_sdata = ckp->sdata; - int session_id = 0, userid = -1; - bool old_match = false; - char sessionid[12]; - int arr_size; - json_t *ret; - int n2len; - - if (unlikely(!json_is_array(params_val))) { - stratum_send_message(ckp_sdata, client, "Invalid json: params not an array"); - return json_string("params not an array"); - } - - sdata = select_sdata(ckp, ckp_sdata, 0); - if (unlikely(!ckp->node && (!sdata || !sdata->current_workbase))) { - LOGWARNING("Failed to provide subscription due to no %s", sdata ? "current workbase" : "sdata"); - stratum_send_message(ckp_sdata, client, "Pool Initialising"); - return json_string("Initialising"); - } - - arr_size = json_array_size(params_val); - /* NOTE useragent is NULL prior to this so should not be used in code - * till after this point */ - if (arr_size > 0) { - const char *buf; - - buf = json_string_value(json_array_get(params_val, 0)); - if (buf && strlen(buf)) - client->useragent = strdup(buf); - else - client->useragent = ckzalloc(1); // Set to "" - if (arr_size > 1) { - /* This would be the session id for reconnect, it will - * not work for clients on a proxied connection. */ - buf = json_string_value(json_array_get(params_val, 1)); - session_id = int_from_sessionid(buf); - LOGDEBUG("Found old session id %d", session_id); - } - if (!ckp->proxy && session_id && !subclient(client_id)) { - if ((client->enonce1_64 = disconnected_sessionid_exists(sdata, session_id, client_id))) { - sprintf(client->enonce1, "%016lx", client->enonce1_64); - old_match = true; - - ck_rlock(&ckp_sdata->workbase_lock); - __fill_enonce1data(sdata->current_workbase, client); - ck_runlock(&ckp_sdata->workbase_lock); - } - } - } else - client->useragent = ckzalloc(1); - - /* Whitelist cgminer based clients to receive stratum messages */ - if (strcasestr(client->useragent, "gminer")) - client->messages = true; - - /* We got what we needed */ - if (ckp->node) - return NULL; - - if (ckp->proxy) { - /* Use the session_id to tell us which user this was. - * If it's not available, see if there's an IP address - * which matches a recently disconnected session. */ - if (session_id) - userid = userid_from_sessionid(ckp_sdata, session_id); - if (userid == -1) - userid = userid_from_sessionip(ckp_sdata, client->address); - if (userid != -1) { - sdata_t *user_sdata = select_sdata(ckp, ckp_sdata, userid); - - if (user_sdata) - sdata = user_sdata; - } - } - - client->sdata = sdata; - if (ckp->proxy) { - LOGINFO("Current %d, selecting proxy %d:%d for client %s", ckp_sdata->proxy->id, - sdata->subproxy->id, sdata->subproxy->subid, client->identity); - } - - if (!old_match) { - /* Create a new extranonce1 based on a uint64_t pointer */ - if (!new_enonce1(ckp, ckp_sdata, sdata, client)) { - stratum_send_message(sdata, client, "Pool full of clients"); - client->reject = 3; - return json_string("proxy full"); - } - LOGINFO("Set new subscription %s to new enonce1 %lx string %s", client->identity, - client->enonce1_64, client->enonce1); - } else { - LOGINFO("Set new subscription %s to old matched enonce1 %lx string %s", - client->identity, client->enonce1_64, client->enonce1); - } - - /* Workbases will exist if sdata->current_workbase is not NULL */ - ck_rlock(&sdata->workbase_lock); - n2len = sdata->workbases->enonce2varlen; - sprintf(sessionid, "%08x", client->session_id); - JSON_CPACK(ret, "[[[s,s]],s,i]", "mining.notify", sessionid, client->enonce1, - n2len); - ck_runlock(&sdata->workbase_lock); - - client->subscribed = true; - - return ret; -} - -static double dsps_from_key(json_t *val, const char *key) -{ - char *string, *endptr; - double ret = 0; - - json_get_string(&string, val, key); - if (!string) - return ret; - ret = strtod(string, &endptr) / nonces; - if (endptr) { - switch (endptr[0]) { - case 'E': - ret *= (double)1000; - [[fallthrough]]; - case 'P': - ret *= (double)1000; - [[fallthrough]]; - case 'T': - ret *= (double)1000; - [[fallthrough]]; - case 'G': - ret *= (double)1000; - [[fallthrough]]; - case 'M': - ret *= (double)1000; - [[fallthrough]]; - case 'K': - ret *= (double)1000; - [[fallthrough]]; - default: - break; - } - } - free(string); - return ret; -} - -static void decay_client(stratum_instance_t *client, double diff, tv_t *now_t) -{ - double tdiff = sane_tdiff(now_t, &client->last_decay); - - /* If we're calling the hashmeter too frequently we'll just end up - * racing and having inappropriate values, so store up diff and update - * at most 20 times per second. Use an integer for uadiff to make the - * update atomic */ - if (tdiff < 0.05) { - client->uadiff += diff; - return; - } - copy_tv(&client->last_decay, now_t); - diff += client->uadiff; - client->uadiff = 0; - decay_time(&client->dsps1, diff, tdiff, MIN1); - decay_time(&client->dsps5, diff, tdiff, MIN5); - decay_time(&client->dsps60, diff, tdiff, HOUR); - decay_time(&client->dsps1440, diff, tdiff, DAY); - decay_time(&client->dsps10080, diff, tdiff, WEEK); -} - -static void decay_worker(worker_instance_t *worker, double diff, tv_t *now_t) -{ - double tdiff = sane_tdiff(now_t, &worker->last_decay); - - if (tdiff < 0.05) { - worker->uadiff += diff; - return; - } - copy_tv(&worker->last_decay, now_t); - diff += worker->uadiff; - worker->uadiff = 0; - decay_time(&worker->dsps1, diff, tdiff, MIN1); - decay_time(&worker->dsps5, diff, tdiff, MIN5); - decay_time(&worker->dsps60, diff, tdiff, HOUR); - decay_time(&worker->dsps1440, diff, tdiff, DAY); - decay_time(&worker->dsps10080, diff, tdiff, WEEK); -} - -static void decay_user(user_instance_t *user, double diff, tv_t *now_t) -{ - double tdiff = sane_tdiff(now_t, &user->last_decay); - - if (tdiff < 0.05) { - user->uadiff += diff; - return; - } - copy_tv(&user->last_decay, now_t); - diff += user->uadiff; - user->uadiff = 0; - decay_time(&user->dsps1, diff, tdiff, MIN1); - decay_time(&user->dsps5, diff, tdiff, MIN5); - decay_time(&user->dsps60, diff, tdiff, HOUR); - decay_time(&user->dsps1440, diff, tdiff, DAY); - decay_time(&user->dsps10080, diff, tdiff, WEEK); -} - -static user_instance_t *get_create_user(sdata_t *sdata, const char *username, bool *new_user); -static worker_instance_t *get_create_worker(sdata_t *sdata, user_instance_t *user, - const char *workername, bool *new_worker); - -/* Load the statistics of and create all known users at startup */ -static void read_userstats(ckpool_t *ckp, sdata_t *sdata, int tvsec_diff) -{ - char dnam[256], s[4096], *username, *buf; - int ret, users = 0, workers = 0; - user_instance_t *user; - struct dirent *dir; - struct stat fdbuf; - bool new_user; - json_t *val; - FILE *fp; - tv_t now; - DIR *d; - int fd; - - snprintf(dnam, 255, "%susers", ckp->logdir); - d = opendir(dnam); - if (!d) { - LOGNOTICE("No user directory found"); - return; - } - - tv_time(&now); - - while ((dir = readdir(d)) != NULL) { - json_t *worker_array, *arr_val; - int64_t authorised; - int lastshare; - size_t index; - - username = basename(dir->d_name); - if (!strcmp(username, "/") || !strcmp(username, ".") || !strcmp(username, "..")) - continue; - - new_user = false; - user = get_create_user(sdata, username, &new_user); - if (unlikely(!new_user)) { - /* All users should be new at this stage */ - LOGWARNING("Duplicate user in read_userstats %s", username); - continue; - } - users++; - snprintf(s, 4095, "%s/%s", dnam, username); - fp = fopen(s, "re"); - if (unlikely(!fp)) { - /* Permission problems should be the only reason this happens */ - LOGWARNING("Failed to load user %s logfile to read", username); - continue; - } - fd = fileno(fp); - if (unlikely(fstat(fd, &fdbuf))) { - LOGERR("Failed to fstat user %s logfile", username); - fclose(fp); - continue; - } - /* We don't know how big the logfile will be so allocate - * according to file size */ - buf = ckzalloc(fdbuf.st_size + 1); - ret = fread(buf, 1, fdbuf.st_size, fp); - fclose(fp); - if (ret < 1) { - LOGNOTICE("Failed to read user %s logfile", username); - dealloc(buf); - continue; - } - val = json_loads(buf, 0, NULL); - if (!val) { - LOGNOTICE("Failed to json decode user %s logfile: %s", username, buf); - dealloc(buf); - continue; - } - dealloc(buf); - - copy_tv(&user->last_share, &now); - copy_tv(&user->last_decay, &now); - user->dsps1 = dsps_from_key(val, "hashrate1m"); - user->dsps5 = dsps_from_key(val, "hashrate5m"); - user->dsps60 = dsps_from_key(val, "hashrate1hr"); - user->dsps1440 = dsps_from_key(val, "hashrate1d"); - user->dsps10080 = dsps_from_key(val, "hashrate7d"); - json_get_int(&lastshare, val, "lastshare"); - user->last_share.tv_sec = lastshare; - json_get_int64(&user->shares, val, "shares"); - json_get_double(&user->best_diff, val, "bestshare"); - json_get_int64(&user->best_ever, val, "bestever"); - json_get_int64(&authorised, val, "authorised"); - user->auth_time = authorised; - if (user->best_diff > user->best_ever) - user->best_ever = user->best_diff; - LOGINFO("Successfully read user %s stats %f %f %f %f %f %f %ld %ld", user->username, - user->dsps1, user->dsps5, user->dsps60, user->dsps1440, - user->dsps10080, user->best_diff, user->best_ever, user->auth_time); - if (tvsec_diff > 60) - decay_user(user, 0, &now); - - worker_array = json_object_get(val, "worker"); - json_array_foreach(worker_array, index, arr_val) { - const char *workername = json_string_value(json_object_get(arr_val, "workername")); - worker_instance_t *worker; - bool new_worker = false; - - if (unlikely(!workername || !strlen(workername)) || - !strstr(workername, username)) { - LOGWARNING("Invalid workername in read_userstats %s", workername); - continue; - } - worker = get_create_worker(sdata, user, workername, &new_worker); - if (unlikely(!new_worker)) { - LOGWARNING("Duplicate worker in read_userstats %s", workername); - continue; - } - workers++; - copy_tv(&worker->last_decay, &now); - worker->dsps1 = dsps_from_key(arr_val, "hashrate1m"); - worker->dsps5 = dsps_from_key(arr_val, "hashrate5m"); - worker->dsps60 = dsps_from_key(arr_val, "hashrate1hr"); - worker->dsps1440 = dsps_from_key(arr_val, "hashrate1d"); - worker->dsps10080 = dsps_from_key(arr_val, "hashrate7d"); - json_get_int(&lastshare, arr_val, "lastshare"); - worker->last_share.tv_sec = lastshare; - json_get_double(&worker->best_diff, arr_val, "bestshare"); - json_get_int64(&worker->best_ever, arr_val, "bestever"); - if (worker->best_diff > worker->best_ever) - worker->best_ever = worker->best_diff; - json_get_int64(&worker->shares, arr_val, "shares"); - LOGINFO("Successfully read worker %s stats %f %f %f %f %f %ld", worker->workername, - worker->dsps1, worker->dsps5, worker->dsps60, worker->dsps1440, worker->best_diff, worker->best_ever); - if (tvsec_diff > 60) - decay_worker(worker, 0, &now); - } - json_decref(val); - } - closedir(d); - - if (likely(users)) - LOGWARNING("Loaded %d users and %d workers", users, workers); -} - -#define DEFAULT_AUTH_BACKOFF (3) /* Set initial backoff to 3 seconds */ - -static user_instance_t *__create_user(sdata_t *sdata, const char *username) -{ - user_instance_t *user = ckzalloc(sizeof(user_instance_t)); - - user->auth_backoff = DEFAULT_AUTH_BACKOFF; - strcpy(user->username, username); - user->id = ++sdata->user_instance_id; - HASH_ADD_STR(sdata->user_instances, username, user); - return user; -} - - -/* Find user by username or create one if it doesn't already exist */ -static user_instance_t *get_create_user(sdata_t *sdata, const char *username, bool *new_user) -{ - user_instance_t *user; - - ck_wlock(&sdata->instance_lock); - HASH_FIND_STR(sdata->user_instances, username, user); - if (unlikely(!user)) { - user = __create_user(sdata, username); - *new_user = true; - } - ck_wunlock(&sdata->instance_lock); - - return user; -} - -static user_instance_t *get_user(sdata_t *sdata, const char *username) -{ - bool dummy = false; - - return get_create_user(sdata, username, &dummy); -} - -static worker_instance_t *__create_worker(user_instance_t *user, const char *workername) -{ - worker_instance_t *worker = ckzalloc(sizeof(worker_instance_t)); - - worker->workername = strdup(workername); - worker->user_instance = user; - DL_APPEND(user->worker_instances, worker); - worker->start_time = time(NULL); - return worker; -} - -static worker_instance_t *__get_worker(user_instance_t *user, const char *workername) -{ - worker_instance_t *worker = NULL, *tmp; - - DL_FOREACH(user->worker_instances, tmp) { - if (!safecmp(workername, tmp->workername)) { - worker = tmp; - break; - } - } - return worker; -} - -/* Find worker amongst a user's workers by workername or create one if it - * doesn't yet exist. */ -static worker_instance_t *get_create_worker(sdata_t *sdata, user_instance_t *user, - const char *workername, bool *new_worker) -{ - worker_instance_t *worker; - - ck_wlock(&sdata->instance_lock); - worker = __get_worker(user, workername); - if (!worker) { - worker = __create_worker(user, workername); - *new_worker = true; - } - ck_wunlock(&sdata->instance_lock); - - return worker; -} - -static worker_instance_t *get_worker(sdata_t *sdata, user_instance_t *user, const char *workername) -{ - bool dummy = false; - - return get_create_worker(sdata, user, workername, &dummy); -} - -/* This simply strips off the first part of the workername and matches it to a - * user or creates a new one. Needs to be entered with client holding a ref - * count. */ -static user_instance_t *generate_user(ckpool_t *ckp, stratum_instance_t *client, - const char *workername) -{ - char *base_username = strdupa(workername), *username; - bool new_user = false, new_worker = false; - sdata_t *sdata = ckp->sdata; - worker_instance_t *worker; - user_instance_t *user; - int len; - - username = strsep(&base_username, "._"); - if (!username || !strlen(username)) - username = base_username; - len = strlen(username); - if (unlikely(len > 127)) - username[127] = '\0'; - - user = get_create_user(sdata, username, &new_user); - worker = get_create_worker(sdata, user, workername, &new_worker); - - /* Create one worker instance for combined data from workers of the - * same name */ - ck_wlock(&sdata->instance_lock); - client->user_instance = user; - client->worker_instance = worker; - DL_APPEND2(user->clients, client, user_prev, user_next); - __inc_worker(sdata,user, worker); - ck_wunlock(&sdata->instance_lock); - - if (!ckp->proxy && (new_user || !user->btcaddress)) { - /* Is this a btc address based username? */ - if (generator_checkaddr(ckp, username, &user->script, &user->segwit)) { - user->btcaddress = true; - user->txnlen = address_to_txn(user->txnbin, username, user->script, user->segwit); - } - } - if (new_user) { - LOGNOTICE("Added new user %s%s", username, user->btcaddress ? - " as address based registration" : ""); - } - - return user; -} - -static void check_global_user(ckpool_t *ckp, user_instance_t *user, stratum_instance_t *client) -{ - sdata_t *sdata = ckp->sdata; - proxy_t *proxy = best_proxy(sdata); - int proxyid = proxy->id; - char buf[256]; - - sprintf(buf, "globaluser=%d:%d:%"PRId64":%s,%s", proxyid, user->id, client->id, - user->username, client->password); - send_proc(ckp->generator,buf); -} - -/* Manage the response to auth, client must hold ref */ -static void client_auth(ckpool_t *ckp, stratum_instance_t *client, user_instance_t *user, - const bool ret) -{ - if (ret) { - client->authorised = ret; - user->authorised = ret; - if (ckp->proxy) { - LOGNOTICE("Authorised client %s to proxy %d:%d, worker %s as user %s", - client->identity, client->proxyid, client->subproxyid, - client->workername, user->username); - if (ckp->userproxy) - check_global_user(ckp, user, client); - } else { - LOGNOTICE("Authorised client %s %s worker %s as user %s", - client->identity, client->address, client->workername, - user->username); - } - user->failed_authtime = 0; - user->auth_backoff = DEFAULT_AUTH_BACKOFF; /* Reset auth backoff time */ - user->throttled = false; - if (!user->auth_time) - user->auth_time = time(NULL); - } else { - if (user->throttled) { - LOGINFO("Client %s %s worker %s failed to authorise as throttled user %s", - client->identity, client->address, client->workername, - user->username); - } else { - LOGNOTICE("Client %s %s worker %s failed to authorise as user %s", - client->identity, client->address, client->workername, - user->username); - } - user->failed_authtime = time(NULL); - user->auth_backoff <<= 1; - /* Cap backoff time to 10 mins */ - if (user->auth_backoff > 600) - user->auth_backoff = 600; - client->reject = 3; - } - /* We can set this outside of lock safely */ - client->authorising = false; -} - -static json_t *__user_notify(const workbase_t *wb, const user_instance_t *user, const bool clean); - -static void update_solo_client(sdata_t *sdata, workbase_t *wb, const int64_t client_id, - user_instance_t *user_instance) -{ - json_t *json_msg = __user_notify(wb, user_instance, true); - - stratum_add_send(sdata, json_msg, client_id, SM_UPDATE); -} - -/* Needs to be entered with client holding a ref count. */ -static json_t *parse_authorise(stratum_instance_t *client, const json_t *params_val, - json_t **err_val) -{ - user_instance_t *user; - ckpool_t *ckp = client->ckp; - const char *buf, *pass; - bool ret = false; - int arr_size; - ts_t now; - - if (unlikely(!json_is_array(params_val))) { - *err_val = json_string("params not an array"); - goto out; - } - arr_size = json_array_size(params_val); - if (unlikely(arr_size < 1)) { - *err_val = json_string("params missing array entries"); - goto out; - } - if (unlikely(!client->useragent)) { - *err_val = json_string("Failed subscription"); - goto out; - } - buf = json_string_value(json_array_get(params_val, 0)); - if (!buf) { - *err_val = json_string("Invalid workername parameter"); - goto out; - } - if (!strlen(buf)) { - *err_val = json_string("Empty workername parameter"); - goto out; - } - if (!memcmp(buf, ".", 1) || !memcmp(buf, "_", 1)) { - *err_val = json_string("Empty username parameter"); - goto out; - } - if (strchr(buf, '/')) { - *err_val = json_string("Invalid character in username"); - goto out; - } - pass = json_string_value(json_array_get(params_val, 1)); - user = generate_user(ckp, client, buf); - client->user_id = user->id; - ts_realtime(&now); - client->start_time = now.tv_sec; - /* NOTE workername is NULL prior to this so should not be used in code - * till after this point */ - client->workername = strdup(buf); - if (pass) - client->password = strndup(pass, 64); - else - client->password = strdup(""); - if (user->failed_authtime) { - time_t now_t = time(NULL); - - if (now_t < user->failed_authtime + user->auth_backoff) { - if (!user->throttled) { - user->throttled = true; - LOGNOTICE("Client %s %s worker %s rate limited due to failed auth attempts", - client->identity, client->address, buf); - } else{ - LOGINFO("Client %s %s worker %s rate limited due to failed auth attempts", - client->identity, client->address, buf); - } - client->dropped = true; - goto out; - } - } - if (!ckp->btcsolo || client->user_instance->btcaddress) - ret = true; - - /* We do the preauth etc. in remote mode, and leave final auth to - * upstream pool to complete. */ - if (!ckp->remote || ckp->btcsolo) - client_auth(ckp, client, user, ret); -out: - if (ckp->btcsolo && ret && !client->remote) { - sdata_t *sdata = ckp->sdata; - workbase_t *wb; - - /* To avoid grabbing recursive lock */ - ck_wlock(&sdata->workbase_lock); - wb = sdata->current_workbase; - wb->readcount++; - ck_wunlock(&sdata->workbase_lock); - - ck_wlock(&sdata->instance_lock); - __generate_userwb(sdata, wb, user); - ck_wunlock(&sdata->instance_lock); - - update_solo_client(sdata, wb, client->id, user); - - ck_wlock(&sdata->workbase_lock); - wb->readcount--; - ck_wunlock(&sdata->workbase_lock); - - stratum_send_diff(sdata, client); - } - return json_boolean(ret); -} - -/* Needs to be entered with client holding a ref count. */ -static void stratum_send_diff(sdata_t *sdata, const stratum_instance_t *client) -{ - json_t *json_msg; - - JSON_CPACK(json_msg, "{s[I]soss}", "params", client->diff, "id", json_null(), - "method", "mining.set_difficulty"); - stratum_add_send(sdata, json_msg, client->id, SM_DIFF); -} - -/* Needs to be entered with client holding a ref count. */ -static void stratum_send_message(sdata_t *sdata, const stratum_instance_t *client, const char *msg) -{ - json_t *json_msg; - - /* Only send messages to whitelisted clients */ - if (!client->messages) - return; - JSON_CPACK(json_msg, "{sosss[s]}", "id", json_null(), "method", "client.show_message", - "params", msg); - stratum_add_send(sdata, json_msg, client->id, SM_MSG); -} - -static double time_bias(const double tdiff, const double period) -{ - double dexp = tdiff / period; - - /* Sanity check to prevent silly numbers for double accuracy **/ - if (unlikely(dexp > 36)) - dexp = 36; - return 1.0 - 1.0 / exp(dexp); -} - -/* Needs to be entered with client holding a ref count. */ -static void add_submit(ckpool_t *ckp, stratum_instance_t *client, const double diff, const bool valid, - const bool submit) -{ - sdata_t *ckp_sdata = ckp->sdata, *sdata = client->sdata; - worker_instance_t *worker = client->worker_instance; - double tdiff, bdiff, dsps, drr, network_diff, bias; - user_instance_t *user = client->user_instance; - int64_t next_blockid, optimal, mindiff; - tv_t now_t; - - mutex_lock(&ckp_sdata->uastats_lock); - if (valid) { - ckp_sdata->stats.unaccounted_shares++; - ckp_sdata->stats.unaccounted_diff_shares += diff; - } else - ckp_sdata->stats.unaccounted_rejects += diff; - mutex_unlock(&ckp_sdata->uastats_lock); - - /* Count only accepted and stale rejects in diff calculation. */ - if (valid) { - worker->shares += diff; - user->shares += diff; - } else if (!submit) - return; - - tv_time(&now_t); - - ck_rlock(&sdata->workbase_lock); - next_blockid = sdata->workbase_id + 1; - if (ckp->proxy) - network_diff = sdata->current_workbase->diff; - else - network_diff = sdata->current_workbase->network_diff; - ck_runlock(&sdata->workbase_lock); - - if (unlikely(!client->first_share.tv_sec)) { - copy_tv(&client->first_share, &now_t); - copy_tv(&client->ldc, &now_t); - } - - decay_client(client, diff, &now_t); - copy_tv(&client->last_share, &now_t); - - decay_worker(worker, diff, &now_t); - copy_tv(&worker->last_share, &now_t); - worker->idle = false; - - decay_user(user, diff, &now_t); - copy_tv(&user->last_share, &now_t); - client->idle = false; - - /* Once we've updated user/client statistics in node mode, we can't - * alter diff ourselves. */ - if (ckp->node) - return; - - client->ssdc++; - bdiff = sane_tdiff(&now_t, &client->first_share); - bias = time_bias(bdiff, 300); - tdiff = sane_tdiff(&now_t, &client->ldc); - - /* Check the difficulty every 240 seconds or as many shares as we - * should have had in that time, whichever comes first. */ - if (client->ssdc < 72 && tdiff < 240) - return; - - if (diff != client->diff) { - client->ssdc = 0; - return; - } - - /* Diff rate ratio */ - dsps = client->dsps5 / bias; - drr = dsps / (double)client->diff; - - /* Optimal rate product is 0.3, allow some hysteresis. */ - if (drr > 0.15 && drr < 0.4) - return; - - /* Client suggest diff overrides worker mindiff */ - if (client->suggest_diff) - mindiff = client->suggest_diff; - else - mindiff = worker->mindiff; - /* Allow slightly lower diffs when users choose their own mindiff */ - if (mindiff) { - if (drr < 0.5) - return; - optimal = lround(dsps * 2.4); - } else - optimal = lround(dsps * 3.33); - - /* Clamp to mindiff ~ network_diff */ - - /* Set to higher of pool mindiff and optimal */ - optimal = MAX(optimal, ckp->mindiff); - - /* Set to higher of optimal and user chosen diff */ - optimal = MAX(optimal, mindiff); - - /* Set to lower of optimal and pool maxdiff */ - if (ckp->maxdiff) - optimal = MIN(optimal, ckp->maxdiff); - - /* Set to lower of optimal and network_diff */ - optimal = MIN(optimal, network_diff); - - if (unlikely(optimal < 1)) - return; - - if (client->diff == optimal) - return; - - /* If this is the first share in a change, reset the last diff change - * to make sure the client hasn't just fallen back after a leave of - * absence */ - if (optimal < client->diff && client->ssdc == 1) { - copy_tv(&client->ldc, &now_t); - return; - } - - client->ssdc = 0; - - LOGINFO("Client %s biased dsps %.2f dsps %.2f drr %.2f adjust diff from %"PRId64" to: %"PRId64" ", - client->identity, dsps, client->dsps5, drr, client->diff, optimal); - - copy_tv(&client->ldc, &now_t); - client->diff_change_job_id = next_blockid; - client->old_diff = client->diff; - client->diff = optimal; - stratum_send_diff(sdata, client); -} - -static void -downstream_block(ckpool_t *ckp, sdata_t *sdata, const json_t *val, const int cblen, - const char *coinbase, const uchar *data) -{ - json_t *block_val = json_deep_copy(val); - - /* Strip unnecessary fields and add extra fields needed */ - json_set_string(block_val, "method", stratum_msgs[SM_BLOCK]); - add_remote_blockdata(ckp, block_val, cblen, coinbase, data); - downstream_json(sdata, block_val, 0, SSEND_PREPEND); - json_decref(block_val); -} - -/* We should already be holding a wb readcount. Needs to be entered with - * client holding a ref count. */ -static void -test_blocksolve(const stratum_instance_t *client, const workbase_t *wb, const uchar *data, - const uchar *hash, const double diff, const char *coinbase, int cblen, - const char *nonce2, const char *nonce, const uint32_t ntime32, const uint32_t version_mask, - const bool stale) -{ - char blockhash[68], cdfield[64], *gbt_block; - sdata_t *sdata = client->sdata; - ckpool_t *ckp = wb->ckp; - double network_diff; - json_t *val = NULL; - uchar flip32[32]; - ts_t ts_now; - bool ret; - - /* Submit anything over 99.9% of the diff in case of rounding errors */ - network_diff = sdata->current_workbase->network_diff * 0.999; - if (likely(diff < network_diff)) - return; - - LOGWARNING("Possible %sblock solve diff %lf !", stale ? "stale share " : "", diff); - /* Can't submit a block in proxy mode without the transactions */ - if (!ckp->node && wb->proxy) - return; - - ts_realtime(&ts_now); - sprintf(cdfield, "%lu,%lu", ts_now.tv_sec, ts_now.tv_nsec); - - gbt_block = process_block(wb, coinbase, cblen, data, hash, flip32, blockhash); - send_node_block(ckp, sdata, client->enonce1, nonce, nonce2, ntime32, version_mask, - wb->id, diff, client->id, coinbase, cblen, data); - - val = json_object(); - json_set_int(val, "height", wb->height); - json_set_string(val,"blockhash", blockhash); - json_set_string(val,"confirmed", "n"); - json_set_int64(val, "workinfoid", wb->id); - json_set_string(val, "username", client->user_instance->username); - json_set_string(val, "workername", client->workername); - if (ckp->remote) - json_set_int64(val, "clientid", client->virtualid); - else - json_set_int64(val, "clientid", client->id); - json_set_string(val, "enonce1", client->enonce1); - json_set_string(val, "nonce2", nonce2); - json_set_string(val, "nonce", nonce); - json_set_uint32(val, "ntime32", ntime32); - json_set_uint32(val, "version_mask", version_mask); - json_set_int64(val, "reward", wb->coinbasevalue); - json_set_double(val, "diff", diff); - json_set_string(val, "createdate", cdfield); - json_set_string(val, "createby", "code"); - json_set_string(val, "createcode", __func__); - json_set_string(val, "createinet", ckp->serverurl[client->server]); - - if (ckp->remote) { - add_remote_blockdata(ckp, val, cblen, coinbase, data); - upstream_json_msgtype(ckp, val, SM_BLOCK); - } else { - downstream_block(ckp, sdata, val, cblen, coinbase, data); - } - - /* Submit block locally after sending it to remote locations avoiding - * the delay of local verification */ - ret = local_block_submit(ckp, gbt_block, flip32, wb->height); - if (ret) - block_solve(ckp, val); - else - block_reject(val); - - json_decref(val); -} - -/* Entered with instance_lock held */ -static inline uchar *__user_coinb2(const stratum_instance_t *client, const workbase_t *wb, int *cb2len) -{ - struct userwb *userwb; - int64_t id; - - if (!client->ckp->btcsolo) - goto out_nouserwb; - - id = wb->id; - HASH_FIND_I64(client->user_instance->userwbs, &id, userwb); - if (unlikely(!userwb)) - goto out_nouserwb; - *cb2len = userwb->coinb2len; - return userwb->coinb2bin; - -out_nouserwb: - *cb2len = wb->coinb2len; - return wb->coinb2bin; -} - -/* Needs to be entered with workbase readcount and client holding a ref count. */ -static double submission_diff(sdata_t *sdata, const stratum_instance_t *client, const workbase_t *wb, - const char *nonce2, const uint32_t ntime32, uint32_t version_mask, - const char *nonce, uchar *hash, const bool stale) -{ - unsigned char merkle_root[32], merkle_sha[64]; - uint32_t *data32, *swap32, benonce32; - char *coinbase, data[80]; - uchar swap[80], hash1[32]; - int cblen, i, cb2len; - uchar *coinb2bin; - double ret; - - /* Leave ample enough room for donation generation address (~25) + length counter + user generation - * wb->coinb1len + wb->enonce1constlen + wb->enonce1varlen + wb->enonce2varlen + wb->coinb2len + 25 + cb2len */ - - coinbase = alloca(1024); - memcpy(coinbase, wb->coinb1bin, wb->coinb1len); - cblen = wb->coinb1len; - memcpy(coinbase + cblen, &client->enonce1bin, wb->enonce1constlen + wb->enonce1varlen); - cblen += wb->enonce1constlen + wb->enonce1varlen; - hex2bin(coinbase + cblen, nonce2, wb->enonce2varlen); - cblen += wb->enonce2varlen; - - ck_rlock(&sdata->instance_lock); - coinb2bin = __user_coinb2(client, wb, &cb2len); - memcpy(coinbase + cblen, coinb2bin, cb2len); - ck_runlock(&sdata->instance_lock); - - cblen += cb2len; - - gen_hash((uchar *)coinbase, merkle_root, cblen); - memcpy(merkle_sha, merkle_root, 32); - for (i = 0; i < wb->merkles; i++) { - memcpy(merkle_sha + 32, &wb->merklebin[i], 32); - gen_hash(merkle_sha, merkle_root, 64); - memcpy(merkle_sha, merkle_root, 32); - } - data32 = (uint32_t *)merkle_sha; - swap32 = (uint32_t *)merkle_root; - flip_32(swap32, data32); - - /* Copy the cached header binary and insert the merkle root */ - memcpy(data, wb->headerbin, 80); - memcpy(data + 36, merkle_root, 32); - - /* Update nVersion when version_mask is in use */ - if (version_mask) { - version_mask = htobe32(version_mask); - data32 = (uint32_t *)data; - *data32 |= version_mask; - } - - /* Insert the nonce value into the data */ - hex2bin(&benonce32, nonce, 4); - data32 = (uint32_t *)(data + 64 + 12); - *data32 = benonce32; - - /* Insert the ntime value into the data */ - data32 = (uint32_t *)(data + 68); - *data32 = htobe32(ntime32); - - /* Hash the share */ - data32 = (uint32_t *)data; - swap32 = (uint32_t *)swap; - flip_80(swap32, data32); - sha256(swap, 80, hash1); - sha256(hash1, 32, hash); - - /* Calculate the diff of the share here */ - ret = diff_from_target(hash); - - /* Test we haven't solved a block regardless of share status */ - test_blocksolve(client, wb, swap, hash, ret, coinbase, cblen, nonce2, nonce, ntime32, version_mask, stale); - - return ret; -} - -/* Optimised for the common case where shares are new */ -static bool new_share(sdata_t *sdata, const uchar *hash, const int64_t wb_id) -{ - share_t *share = ckzalloc(sizeof(share_t)), *match = NULL; - bool ret = true; - - memcpy(share->hash, hash, 32); - share->workbase_id = wb_id; - - mutex_lock(&sdata->share_lock); - sdata->shares_generated++; - HASH_FIND(hh, sdata->shares, hash, 32, match); - if (likely(!match)) - HASH_ADD(hh, sdata->shares, hash, 32, share); - mutex_unlock(&sdata->share_lock); - - if (unlikely(match)) { - dealloc(share); - ret = false; - } - return ret; -} - -static void update_client(const stratum_instance_t *client, const int64_t client_id); - -/* Submit a share in proxy mode to the parent pool. workbase_lock is held. - * Needs to be entered with client holding a ref count. */ -static void submit_share(stratum_instance_t *client, const int64_t jobid, const char *nonce2, - const char *ntime, const char *nonce) -{ - ckpool_t *ckp = client->ckp; - json_t *json_msg; - char enonce2[32]; - - sprintf(enonce2, "%s%s", client->enonce1var, nonce2); - JSON_CPACK(json_msg, "{sIsssssssIsIsi}", "jobid", jobid, "nonce2", enonce2, - "ntime", ntime, "nonce", nonce, "client_id", client->id, - "proxy", client->proxyid, "subproxy", client->subproxyid); - generator_add_send(ckp, json_msg); -} - -static void check_best_diff(sdata_t *sdata, user_instance_t *user,worker_instance_t *worker, - const double sdiff, stratum_instance_t *client) -{ - char buf[512]; - bool best_ever = false, best_worker = false, best_user = false; - - if (sdiff > user->best_ever) { - user->best_ever = sdiff; - best_ever = true; - } - if (sdiff > worker->best_ever) { - worker->best_ever = sdiff; - best_ever = true; - } - if (sdiff > worker->best_diff) { - worker->best_diff = sdiff; - best_worker = true; - } - if (sdiff > user->best_diff) { - user->best_diff = sdiff; - best_user = true; - } - /* Check against pool's best diff unlocked first, then recheck once - * the mutex is locked. */ - if (best_user && sdiff > sdata->stats.best_diff) { - /* Don't set pool best diff if it's a block since we will have - * reset it to zero. */ - mutex_lock(&sdata->stats_lock); - if (unlikely(sdiff > sdata->stats.best_diff && sdiff < sdata->current_workbase->network_diff)) - sdata->stats.best_diff = sdiff; - mutex_unlock(&sdata->stats_lock); - } - if (likely((!best_user && !best_worker) || !client)) - return; - snprintf(buf, 511, "New best %sshare for %s: %lf", best_ever ? "ever " : "", - best_user ? "user" : "worker", sdiff); - stratum_send_message(sdata, client, buf); -} - -#define JSON_ERR(err) json_string(SHARE_ERR(err)) - -/* Needs to be entered with client holding a ref count. */ -static json_t *parse_submit(stratum_instance_t *client, json_t *json_msg, - const json_t *params_val, json_t **err_val) -{ - bool share = false, result = false, invalid = true, submit = false, stale = false; - const char *workername, *job_id, *ntime, *version_mask; - double diff = client->diff, wdiff = 0, sdiff = -1; - char hexhash[68] = {}, sharehash[32], cdfield[64]; - user_instance_t *user = client->user_instance; - char *fname = NULL, *s, *nonce, *nonce2; - uint32_t ntime32, version_mask32 = 0; - sdata_t *sdata = client->sdata; - enum share_err err = SE_NONE; - ckpool_t *ckp = client->ckp; - char idstring[24] = {}; - workbase_t *wb = NULL; - uchar hash[32]; - int nlen, len; - time_t now_t; - json_t *val; - int64_t id; - ts_t now; - FILE *fp; - - ts_realtime(&now); - now_t = now.tv_sec; - sprintf(cdfield, "%lu,%lu", now.tv_sec, now.tv_nsec); - - if (unlikely(!json_is_array(params_val))) { - err = SE_NOT_ARRAY; - *err_val = JSON_ERR(err); - goto out; - } - if (unlikely(json_array_size(params_val) < 5)) { - err = SE_INVALID_SIZE; - *err_val = JSON_ERR(err); - goto out; - } - workername = json_string_value(json_array_get(params_val, 0)); - if (unlikely(!workername || !strlen(workername))) { - err = SE_NO_USERNAME; - *err_val = JSON_ERR(err); - goto out; - } - job_id = json_string_value(json_array_get(params_val, 1)); - if (unlikely(!job_id || !strlen(job_id))) { - err = SE_NO_JOBID; - *err_val = JSON_ERR(err); - goto out; - } - nonce2 = (char *)json_string_value(json_array_get(params_val, 2)); - if (unlikely(!nonce2 || !strlen(nonce2) || !validhex(nonce2))) { - err = SE_NO_NONCE2; - *err_val = JSON_ERR(err); - goto out; - } - ntime = json_string_value(json_array_get(params_val, 3)); - if (unlikely(!ntime || !strlen(ntime) || !validhex(ntime))) { - err = SE_NO_NTIME; - *err_val = JSON_ERR(err); - goto out; - } - nonce = (char *)json_string_value(json_array_get(params_val, 4)); - if (unlikely(!nonce || strlen(nonce) < 8 || !validhex(nonce))) { - err = SE_NO_NONCE; - *err_val = JSON_ERR(err); - goto out; - } - - version_mask = json_string_value(json_array_get(params_val, 5)); - if (version_mask && strlen(version_mask) && validhex(version_mask)) { - sscanf(version_mask, "%x", &version_mask32); - // check version mask - if (version_mask32 && ((~ckp->version_mask) & version_mask32) != 0) { - // means client changed some bits which server doesn't allow to change - err = SE_INVALID_VERSION_MASK; - *err_val = JSON_ERR(err); - goto out; - } - } - if (safecmp(workername, client->workername)) { - err = SE_WORKER_MISMATCH; - *err_val = JSON_ERR(err); - goto out; - } - sscanf(job_id, "%lx", &id); - sscanf(ntime, "%x", &ntime32); - - share = true; - - if (unlikely(!sdata->current_workbase)) - return json_boolean(false); - - wb = get_workbase(sdata, id); - if (unlikely(!wb)) { - id = sdata->current_workbase->id; - err = SE_INVALID_JOBID; - json_set_string(json_msg, "reject-reason", SHARE_ERR(err)); - strncpy(idstring, job_id, 19); - ASPRINTF(&fname, "%s.sharelog", sdata->current_workbase->logdir); - goto out_nowb; - } - wdiff = wb->diff; - strncpy(idstring, wb->idstring, 20); - ASPRINTF(&fname, "%s.sharelog", wb->logdir); - /* Fix broken clients sending too many chars. Nonce2 is part of the - * read only json so use a temporary variable and modify it. */ - len = wb->enonce2varlen * 2; - nlen = strlen(nonce2); - if (unlikely(nlen != len)) { - if (nlen > len) { - nonce2 = strdupa(nonce2); - nonce2[len] = '\0'; - } else if (nlen < len) { - char *tmp = nonce2; - - nonce2 = strdupa("0000000000000000"); - memcpy(nonce2, tmp, nlen); - nonce2[len] = '\0'; - } - } - /* Same with nonce, but we need at least 8 chars. We checked for this - * earlier. */ - len = 8; - nlen = strlen(nonce); - if (unlikely(nlen > len)) { - nonce = strdupa(nonce); - nonce[len] = '\0'; - } - if (id < sdata->blockchange_id) - stale = true; - sdiff = submission_diff(sdata, client, wb, nonce2, ntime32, version_mask32, nonce, hash, stale); - if (sdiff > client->best_diff) { - worker_instance_t *worker = client->worker_instance; - - client->best_diff = sdiff; - LOGINFO("User %s worker %s client %s new best diff %lf", user->username, - worker->workername, client->identity, sdiff); - check_best_diff(sdata, user, worker, sdiff, client); - } - bswap_256(sharehash, hash); - __bin2hex(hexhash, sharehash, 32); - - if (stale) { - /* Accept shares if they're received on remote nodes before the - * workbase was retired. */ - if (client->latency) { - int latency; - tv_t now_tv; - - ts_to_tv(&now_tv, &now); - latency = ms_tvdiff(&now_tv, &wb->retired); - if (latency < client->latency) { - LOGDEBUG("Accepting %dms late share from client %s", - latency, client->identity); - goto no_stale; - } - } - err = SE_STALE; - json_set_string(json_msg, "reject-reason", SHARE_ERR(err)); - goto out_submit; - } -no_stale: - /* Ntime cannot be less, but allow forward ntime rolling up to max */ - if (ntime32 < wb->ntime32 || ntime32 > wb->ntime32 + 7000) { - err = SE_NTIME_INVALID; - json_set_string(json_msg, "reject-reason", SHARE_ERR(err)); - goto out_put; - } - invalid = false; -out_submit: - if (sdiff >= wdiff) - submit = true; - if (unlikely(sdiff >= sdata->current_workbase->network_diff)) { - /* Make sure we always submit any possible block solve */ - LOGWARNING("Submitting possible block solve share diff %lf !", sdiff); - submit = true; - } -out_put: - put_workbase(sdata, wb); -out_nowb: - - /* Accept shares of the old diff until the next update */ - if (id < client->diff_change_job_id) - diff = client->old_diff; - if (!invalid) { - char wdiffsuffix[16]; - - suffix_string(wdiff, wdiffsuffix, 16, 0); - if (sdiff >= diff) { - if (new_share(sdata, hash, id)) { - LOGINFO("Accepted client %s share diff %.1f/%.0f/%s: %s", - client->identity, sdiff, diff, wdiffsuffix, hexhash); - result = true; - } else { - err = SE_DUPE; - json_set_string(json_msg, "reject-reason", SHARE_ERR(err)); - LOGINFO("Rejected client %s dupe diff %.1f/%.0f/%s: %s", - client->identity, sdiff, diff, wdiffsuffix, hexhash); - submit = false; - } - } else { - err = SE_HIGH_DIFF; - LOGINFO("Rejected client %s high diff %.1f/%.0f/%s: %s", - client->identity, sdiff, diff, wdiffsuffix, hexhash); - json_set_string(json_msg, "reject-reason", SHARE_ERR(err)); - submit = false; - } - } else - LOGINFO("Rejected client %s invalid share %s", client->identity, SHARE_ERR(err)); - - /* Submit share to upstream pool in proxy mode. We submit valid and - * stale shares and filter out the rest. */ - if (wb && wb->proxy && submit) { - LOGINFO("Submitting share upstream: %s", hexhash); - submit_share(client, id, nonce2, ntime, nonce); - } - - add_submit(ckp, client, diff, result, submit); - - /* Now write to the pool's sharelog. */ - val = json_object(); - json_set_int(val, "workinfoid", id); - if (ckp->remote) - json_set_int64(val, "clientid", client->virtualid); - else - json_set_int64(val, "clientid", client->id); - json_set_string(val, "enonce1", client->enonce1); - json_set_string(val, "nonce2", nonce2); - json_set_string(val, "nonce", nonce); - json_set_string(val, "ntime", ntime); - json_set_double(val, "diff", diff); - json_set_double(val, "sdiff", sdiff); - json_set_string(val, "hash", hexhash); - json_set_bool(val, "result", result); - json_object_set(val, "reject-reason", json_object_get(json_msg, "reject-reason")); - json_object_set(val, "error", *err_val); - json_set_int(val, "errn", err); - json_set_string(val, "createdate", cdfield); - json_set_string(val, "createby", "code"); - json_set_string(val, "createcode", __func__); - json_set_string(val, "createinet", ckp->serverurl[client->server]); - json_set_string(val, "workername", client->workername); - json_set_string(val, "username", user->username); - json_set_string(val, "address", client->address); - json_set_string(val, "agent", client->useragent); - - if (ckp->logshares) { - fp = fopen(fname, "ae"); - if (likely(fp)) { - s = json_dumps(val, JSON_EOL); - len = strlen(s); - len = fprintf(fp, "%s", s); - free(s); - fclose(fp); - if (unlikely(len < 0)) - LOGERR("Failed to fwrite to %s", fname); - } else - LOGERR("Failed to fopen %s", fname); - } - if (ckp->remote) - upstream_json_msgtype(ckp, val, SM_SHARE); - json_decref(val); -out: - if (!sdata->wbincomplete && ((!result && !submit) || !share)) { - /* Is this the first in a run of invalids? */ - if (client->first_invalid < client->last_share.tv_sec || !client->first_invalid) - client->first_invalid = now_t; - else if (client->first_invalid && client->first_invalid < now_t - 180 && client->reject < 3) { - LOGNOTICE("Client %s rejecting for 180s, disconnecting", client->identity); - if (ckp->node) - connector_drop_client(ckp, client->id); - else - stratum_send_message(sdata, client, "Disconnecting for continuous invalid shares"); - client->reject = 3; - } else if (client->first_invalid && client->first_invalid < now_t - 120 && client->reject < 2) { - LOGNOTICE("Client %s rejecting for 120s, reconnecting", client->identity); - stratum_send_message(sdata, client, "Reconnecting for continuous invalid shares"); - reconnect_client(sdata, client); - client->reject = 2; - } else if (client->first_invalid && client->first_invalid < now_t - 60 && !client->reject) { - LOGNOTICE("Client %s rejecting for 60s, sending update", client->identity); - update_client(client, client->id); - client->reject = 1; - } - } else if (client->reject < 3) { - client->first_invalid = 0; - client->reject = 0; - } - - if (!share) { - if (ckp->remote) { - val = json_object(); - if (ckp->remote) - json_set_int64(val, "clientid", client->virtualid); - else - json_set_int64(val, "clientid", client->id); - if (user->secondaryuserid) - json_set_string(val, "secondaryuserid", user->secondaryuserid); - json_set_string(val, "enonce1", client->enonce1); - json_set_int(val, "workinfoid", sdata->current_workbase->id); - json_set_string(val, "workername", client->workername); - json_set_string(val, "username", user->username); - json_object_set(val, "error", *err_val); - json_set_int(val, "errn", err); - json_set_string(val, "createdate", cdfield); - json_set_string(val, "createby", "code"); - json_set_string(val, "createcode", __func__); - json_set_string(val, "createinet", ckp->serverurl[client->server]); - json_decref(val); - } - LOGINFO("Invalid share from client %s: %s", client->identity, client->workername); - } - free(fname); - return json_boolean(result); -} - -/* Must enter with workbase_lock held */ -static json_t *__stratum_notify(const workbase_t *wb, const bool clean) -{ - json_t *val; - - JSON_CPACK(val, "{s:[ssssosssb],s:o,s:s}", - "params", - wb->idstring, - wb->prevhash, - wb->coinb1, - wb->coinb2, - json_deep_copy(wb->merkle_array), - wb->bbversion, - wb->nbit, - wb->ntime, - clean, - "id", json_null(), - "method", "mining.notify"); - return val; -} - -static void stratum_broadcast_update(sdata_t *sdata, const workbase_t *wb, const bool clean) -{ - json_t *json_msg; - - ck_rlock(&sdata->workbase_lock); - json_msg = __stratum_notify(wb, clean); - ck_runlock(&sdata->workbase_lock); - - stratum_broadcast(sdata, json_msg, SM_UPDATE); -} - -/* For sending a single stratum template update */ -static void stratum_send_update(sdata_t *sdata, const int64_t client_id, const bool clean) -{ - ckpool_t *ckp = sdata->ckp; - json_t *json_msg; - - if (unlikely(!sdata->current_workbase)) { - if (!ckp->proxy) - LOGWARNING("No current workbase to send stratum update"); - else - LOGDEBUG("No current workbase to send stratum update for client %"PRId64, client_id); - return; - } - - ck_rlock(&sdata->workbase_lock); - json_msg = __stratum_notify(sdata->current_workbase, clean); - ck_runlock(&sdata->workbase_lock); - - stratum_add_send(sdata, json_msg, client_id, SM_UPDATE); -} - -/* Hold instance and workbase lock */ -static json_t *__user_notify(const workbase_t *wb, const user_instance_t *user, const bool clean) -{ - int64_t id = wb->id; - struct userwb *userwb; - json_t *val; - - HASH_FIND_I64(user->userwbs, &id, userwb); - if (unlikely(!userwb)) { - LOGINFO("Failed to find userwb in __user_notify!"); - return NULL; - } - - JSON_CPACK(val, "{s:[ssssosssb],s:o,s:s}", - "params", - wb->idstring, - wb->prevhash, - wb->coinb1, - userwb->coinb2, - json_deep_copy(wb->merkle_array), - wb->bbversion, - wb->nbit, - wb->ntime, - clean, - "id", json_null(), - "method", "mining.notify"); - return val; -} - -/* Sends a stratum update with a unique coinb2 for every client. Avoid - * recursive locking. */ -static void stratum_broadcast_updates(sdata_t *sdata, bool clean) -{ - stratum_instance_t *client, *tmp; - json_t *json_msg; - - ck_wlock(&sdata->instance_lock); - HASH_ITER(hh, sdata->stratum_instances, client, tmp) { - if (!client->user_instance) - continue; - __inc_instance_ref(client); - ck_wunlock(&sdata->instance_lock); - - ck_rlock(&sdata->workbase_lock); - json_msg = __user_notify(sdata->current_workbase, client->user_instance, clean); - ck_runlock(&sdata->workbase_lock); - - if (likely(json_msg)) - stratum_add_send(sdata, json_msg, client->id, SM_UPDATE); - - ck_wlock(&sdata->instance_lock); - __dec_instance_ref(client); - } - ck_wunlock(&sdata->instance_lock); -} - -static void send_json_err(sdata_t *sdata, const int64_t client_id, json_t *id_val, const char *err_msg) -{ - json_t *val; - - /* Some clients have no id_val so pass back an empty string. */ - if (unlikely(!id_val)) - JSON_CPACK(val, "{ssss}", "id", "", "error", err_msg); - else - JSON_CPACK(val, "{soss}", "id", json_deep_copy(id_val), "error", err_msg); - stratum_add_send(sdata, val, client_id, SM_ERROR); -} - -/* Needs to be entered with client holding a ref count. */ -static void update_client(const stratum_instance_t *client, const int64_t client_id) -{ - sdata_t *sdata = client->sdata; - - if (!client->ckp->btcsolo) - stratum_send_update(sdata, client_id, true); - stratum_send_diff(sdata, client); -} - -static json_params_t -*create_json_params(const int64_t client_id, const json_t *method, const json_t *params, - const json_t *id_val) -{ - json_params_t *jp = ckalloc(sizeof(json_params_t)); - - jp->method = json_deep_copy(method); - jp->params = json_deep_copy(params); - jp->id_val = json_deep_copy(id_val); - jp->client_id = client_id; - return jp; -} - -/* Implement support for the diff in the params as well as the originally - * documented form of placing diff within the method. Needs to be entered with - * client holding a ref count. */ -static void suggest_diff(ckpool_t *ckp, stratum_instance_t *client, const char *method, - const json_t *params_val) -{ - json_t *arr_val = json_array_get(params_val, 0); - int64_t sdiff; - - if (unlikely(!client_active(client))) { - LOGNOTICE("Attempted to suggest diff on unauthorised client %s", client->identity); - return; - } - if (arr_val && json_is_integer(arr_val)) - sdiff = json_integer_value(arr_val); - else if (sscanf(method, "mining.suggest_difficulty(%"PRId64, &sdiff) != 1) { - LOGINFO("Failed to parse suggest_difficulty for client %s", client->identity); - return; - } - /* Clamp suggest diff to global pool mindiff */ - if (sdiff < ckp->mindiff) - sdiff = ckp->mindiff; - if (sdiff == client->suggest_diff) - return; - client->suggest_diff = sdiff; - if (client->diff == sdiff) - return; - client->diff_change_job_id = client->sdata->workbase_id + 1; - client->old_diff = client->diff; - client->diff = sdiff; - stratum_send_diff(ckp->sdata, client); -} - -/* Send diff first when sending the first stratum template after subscribing */ -static void init_client(const stratum_instance_t *client, const int64_t client_id) -{ - sdata_t *sdata = client->sdata; - - stratum_send_diff(sdata, client); - if (!client->ckp->btcsolo) - stratum_send_update(sdata, client_id, true); -} - -/* When a node first connects it has no transactions so we have to send all - * current ones to it. */ -static void send_node_all_txns(sdata_t *sdata, const stratum_instance_t *client) -{ - json_t *txn_array, *val, *txn_val; - txntable_t *txn, *tmp; - smsg_t *msg; - - txn_array = json_array(); - - ck_rlock(&sdata->txn_lock); - HASH_ITER(hh, sdata->txns, txn, tmp) { - JSON_CPACK(txn_val, "{ss,ss}", "hash", txn->hash, "data", txn->data); - json_array_append_new(txn_array, txn_val); - } - ck_runlock(&sdata->txn_lock); - - if (client->trusted) { - JSON_CPACK(val, "{ss,so}", "method", stratum_msgs[SM_TRANSACTIONS], - "transaction", txn_array); - } else { - JSON_CPACK(val, "{ss,so}", "node.method", stratum_msgs[SM_TRANSACTIONS], - "transaction", txn_array); - } - msg = ckzalloc(sizeof(smsg_t)); - msg->json_msg = val; - msg->client_id = client->id; - ckmsgq_add(sdata->ssends, msg); - LOGNOTICE("Sending new node client %s all transactions", client->identity); -} - -static void *setup_node(void *arg) -{ - stratum_instance_t *client = (stratum_instance_t *)arg; - - pthread_detach(pthread_self()); - - client->latency = round_trip(client->address) / 2; - LOGNOTICE("Node client %s %s latency set to %dms", client->identity, - client->address, client->latency); - send_node_all_txns(client->sdata, client); - dec_instance_ref(client->sdata, client); - return NULL; -} - -/* Create a thread to asynchronously set latency to the node to not - * block. Increment the ref count to prevent the client pointer - * dereferencing under us, allowing the thread to decrement it again when - * finished. */ -static void add_mining_node(ckpool_t *ckp, sdata_t *sdata, stratum_instance_t *client) -{ - pthread_t pth; - - ck_wlock(&sdata->instance_lock); - client->node = true; - DL_APPEND2(sdata->node_instances, client, node_prev, node_next); - __inc_instance_ref(client); - ck_wunlock(&sdata->instance_lock); - - LOGWARNING("Added client %s %s as mining node on server %d:%s", client->identity, - client->address, client->server, ckp->serverurl[client->server]); - - create_pthread(&pth, setup_node, client); -} - -static void add_remote_server(sdata_t *sdata, stratum_instance_t *client) -{ - ck_wlock(&sdata->instance_lock); - client->trusted = true; - DL_APPEND2(sdata->remote_instances, client, remote_prev, remote_next); - __inc_instance_ref(client); - ck_wunlock(&sdata->instance_lock); - - send_node_all_txns(sdata, client); - dec_instance_ref(sdata, client); -} - -/* Enter with client holding ref count */ -static void parse_method(ckpool_t *ckp, sdata_t *sdata, stratum_instance_t *client, - const int64_t client_id, json_t *id_val, json_t *method_val, - json_t *params_val) -{ - const char *method; - - /* Random broken clients send something not an integer as the id so we - * copy the json item for id_val as is for the response. By far the - * most common messages will be shares so look for those first */ - method = json_string_value(method_val); - if (likely(cmdmatch(method, "mining.submit") && client->authorised)) { - json_params_t *jp = create_json_params(client_id, method_val, params_val, id_val); - - ckmsgq_add(sdata->sshareq, jp); - return; - } - - if (cmdmatch(method, "mining.term")) { - LOGDEBUG("Mining terminate requested from %s %s", client->identity, client->address); - drop_client(ckp, sdata, client_id); - return; - } - - if (cmdmatch(method, "mining.subscribe")) { - json_t *val, *result_val; - - if (unlikely(client->subscribed)) { - LOGNOTICE("Client %s %s trying to subscribe twice", - client->identity, client->address); - return; - } - result_val = parse_subscribe(client, client_id, params_val); - /* Shouldn't happen, sanity check */ - if (unlikely(!result_val)) { - LOGWARNING("parse_subscribe returned NULL result_val"); - return; - } - val = json_object(); - json_object_set_new_nocheck(val, "result", result_val); - json_object_set_nocheck(val, "id", id_val); - json_object_set_new_nocheck(val, "error", json_null()); - stratum_add_send(sdata, val, client_id, SM_SUBSCRIBERESULT); - if (likely(client->subscribed)) - init_client(client, client_id); - return; - } - - if (unlikely(cmdmatch(method, "mining.remote"))) { - char buf[256]; - - /* Add this client as a trusted remote node in the connector and - * drop the client in the stratifier */ - if (!ckp->trusted[client->server] || ckp->proxy) { - LOGNOTICE("Dropping client %s %s trying to authorise as remote node on non trusted server %d", - client->identity, client->address, client->server); - connector_drop_client(ckp, client_id); - } else { - snprintf(buf, 255, "remote=%"PRId64, client_id); - send_proc(ckp->connector, buf); - add_remote_server(sdata, client); - } - sprintf(client->identity, "remote:%"PRId64, client_id); - return; - } - - if (unlikely(cmdmatch(method, "mining.node"))) { - char buf[256]; - - /* Add this client as a passthrough in the connector and - * add it to the list of mining nodes in the stratifier */ - if (!ckp->nodeserver[client->server] || ckp->proxy) { - LOGNOTICE("Dropping client %s %s trying to authorise as node on non node server %d", - client->identity, client->address, client->server); - connector_drop_client(ckp, client_id); - drop_client(ckp, sdata, client_id); - } else { - snprintf(buf, 255, "passthrough=%"PRId64, client_id); - send_proc(ckp->connector, buf); - add_mining_node(ckp, sdata, client); - sprintf(client->identity, "node:%"PRId64, client_id); - } - return; - } - - if (unlikely(cmdmatch(method, "mining.passthrough"))) { - char buf[256]; - - if (ckp->proxy || ckp->node ) { - LOGNOTICE("Dropping client %s %s trying to connect as passthrough on unsupported server %d", - client->identity, client->address, client->server); - connector_drop_client(ckp, client_id); - drop_client(ckp, sdata, client_id); - } else { - /*Flag this as a passthrough and manage its messages - * accordingly. No data from this client id should ever - * come directly back to this stratifier. */ - LOGNOTICE("Adding passthrough client %s %s", client->identity, client->address); - client->passthrough = true; - snprintf(buf, 255, "passthrough=%"PRId64, client_id); - send_proc(ckp->connector, buf); - sprintf(client->identity, "passthrough:%"PRId64, client_id); - } - return; - } - - /* We shouldn't really allow unsubscribed users to authorise first but - * some broken stratum implementations do that and we can handle it. */ - if (cmdmatch(method, "mining.auth")) { - json_params_t *jp; - - if (unlikely(client->authorised)) { - LOGINFO("Client %s %s trying to authorise twice", - client->identity, client->address); - return; - } - jp = create_json_params(client_id, method_val, params_val, id_val); - ckmsgq_add(sdata->sauthq, jp); - return; - } - - if (cmdmatch(method, "mining.configure")) { - json_t *val, *result_val; - char version_str[12]; - - LOGINFO("Mining configure requested from %s %s", client->identity, - client->address); - sprintf(version_str, "%08x", ckp->version_mask); - val = json_object(); - JSON_CPACK(result_val, "{sbss}", "version-rolling", json_true(), - "version-rolling.mask", version_str); - json_object_set_new_nocheck(val, "result", result_val); - json_object_set_nocheck(val, "id", id_val); - json_object_set_new_nocheck(val, "error", json_null()); - stratum_add_send(sdata, val, client_id, SM_CONFIGURE); - return; - } - - /* We should only accept requests from subscribed and authed users here - * on */ - if (!client->subscribed) { - LOGINFO("Dropping %s from unsubscribed client %s %s", method, - client->identity, client->address); - connector_drop_client(ckp, client_id); - return; - } - - /* We should only accept authorised requests from here on */ - if (!client->authorised) { - LOGINFO("Dropping %s from unauthorised client %s %s", method, - client->identity, client->address); - return; - } - - if (cmdmatch(method, "mining.suggest")) { - suggest_diff(ckp, client, method, params_val); - return; - } - - /* Covers both get_transactions and get_txnhashes */ - if (cmdmatch(method, "mining.get")) { - json_params_t *jp = create_json_params(client_id, method_val, params_val, id_val); - - ckmsgq_add(sdata->stxnq, jp); - return; - } - - /* Unhandled message here */ - LOGINFO("Unhandled client %s %s method %s", client->identity, client->address, method); - return; -} - -static void free_smsg(smsg_t *msg) -{ - json_decref(msg->json_msg); - free(msg); -} - -/* Even though we check the results locally in node mode, check the upstream - * results in case of runs of invalids. */ -static void parse_share_result(ckpool_t *ckp, stratum_instance_t *client, json_t *val) -{ - time_t now_t; - ts_t now; - - if (likely(json_is_true(val))) { - client->upstream_invalid = 0; - return; - } - ts_realtime(&now); - now_t = now.tv_sec; - if (client->upstream_invalid < client->last_share.tv_sec || !client->upstream_invalid) - client->upstream_invalid = now_t; - else if (client->upstream_invalid && client->upstream_invalid < now_t - 150) { - LOGNOTICE("Client %s upstream rejects for 150s, disconnecting", client->identity); - connector_drop_client(ckp, client->id); - client->reject = 3; - } -} - -static void parse_diff(stratum_instance_t *client, json_t *val) -{ - double diff = json_number_value(json_array_get(val, 0)); - - LOGINFO("Set client %s to diff %lf", client->identity, diff); - client->diff = diff; -} - -static void parse_subscribe_result(stratum_instance_t *client, json_t *val) -{ - int len; - - strncpy(client->enonce1, json_string_value(json_array_get(val, 1)), 16); - len = strlen(client->enonce1) / 2; - hex2bin(client->enonce1bin, client->enonce1, len); - memcpy(&client->enonce1_64, client->enonce1bin, 8); - LOGINFO("Client %s got enonce1 %lx string %s", client->identity, client->enonce1_64, client->enonce1); -} - -static void parse_authorise_result(ckpool_t *ckp, sdata_t *sdata, stratum_instance_t *client, - json_t *val) -{ - if (!json_is_true(val)) { - LOGNOTICE("Client %s was not authorised upstream, dropping", client->identity); - client->authorised = false; - connector_drop_client(ckp, client->id); - drop_client(ckp, sdata, client->id); - } else - LOGINFO("Client %s was authorised upstream", client->identity); -} - -static int node_msg_type(json_t *val) -{ - const char *method; - int i, ret = -1; - - if (!val) - goto out; - method = json_string_value(json_object_get(val, "node.method")); - if (method) { - for (i = 0; i < SM_NONE; i++) { - if (!strcmp(method, stratum_msgs[i])) { - ret = i; - break; - } - } - json_object_del(val, "node.method"); - } else - method = json_string_value(json_object_get(val, "method")); - - if (ret < 0 && method) { - if (!safecmp(method, "mining.submit")) - ret = SM_SHARE; - else if (!safecmp(method, "mining.notify")) - ret = SM_UPDATE; - else if (!safecmp(method, "mining.subscribe")) - ret = SM_SUBSCRIBE; - else if (cmdmatch(method, "mining.auth")) - ret = SM_AUTH; - else if (cmdmatch(method, "mining.get")) - ret = SM_TXNS; - else if (cmdmatch(method, "mining.suggest_difficulty")) - ret = SM_SUGGESTDIFF; - else - ret = SM_NONE; - } -out: - return ret; -} - -static user_instance_t *generate_remote_user(ckpool_t *ckp, const char *workername) -{ - char *base_username = strdupa(workername), *username; - sdata_t *sdata = ckp->sdata; - bool new_user = false; - user_instance_t *user; - int len; - - username = strsep(&base_username, "._"); - if (!username || !strlen(username)) - username = base_username; - len = strlen(username); - if (unlikely(len > 127)) - username[127] = '\0'; - - user = get_create_user(sdata, username, &new_user); - - if (!ckp->proxy && (new_user || !user->btcaddress)) { - /* Is this a btc address based username? */ - if (generator_checkaddr(ckp, username, &user->script, &user->segwit)) { - user->btcaddress = true; - user->txnlen = address_to_txn(user->txnbin, username, user->script, user->segwit); - } - } - if (new_user) { - LOGNOTICE("Added new remote user %s%s", username, user->btcaddress ? - " as address based registration" : ""); - } - - return user; -} - -static void parse_remote_share(ckpool_t *ckp, sdata_t *sdata, json_t *val, const char *buf) -{ - json_t *workername_val = json_object_get(val, "workername"); - worker_instance_t *worker; - const char *workername; - double diff, sdiff = 0; - user_instance_t *user; - tv_t now_t; - - workername = json_string_value(workername_val); - if (unlikely(!workername_val || !workername)) { - LOGWARNING("Failed to get workername from remote message %s", buf); - return; - } - if (unlikely(!json_get_double(&diff, val, "diff") || diff < 0.000001)) { - LOGWARNING("Unable to parse valid diff from remote message %s", buf); - return; - } - json_get_double(&sdiff, val, "sdiff"); - user = generate_remote_user(ckp, workername); - user->authorised = true; - worker = get_worker(sdata, user, workername); - check_best_diff(sdata, user, worker, sdiff, NULL); - - mutex_lock(&sdata->uastats_lock); - sdata->stats.unaccounted_shares++; - sdata->stats.unaccounted_diff_shares += diff; - mutex_unlock(&sdata->uastats_lock); - - worker->shares += diff; - user->shares += diff; - tv_time(&now_t); - - decay_worker(worker, diff, &now_t); - copy_tv(&worker->last_share, &now_t); - worker->idle = false; - - decay_user(user, diff, &now_t); - copy_tv(&user->last_share, &now_t); - - LOGINFO("Added %.0lf remote shares to worker %s", diff, workername); -} - -static void parse_remote_shareerr(ckpool_t *ckp, json_t *val, const char *buf) -{ - const char *workername; - - workername = json_string_value(json_object_get(val, "workername")); - if (unlikely(!workername)) { - LOGWARNING("Failed to find workername in parse_remote_shareerr %s", buf); - return; - } - /* Return value ignored */ - generate_remote_user(ckp, workername); -} - -static void send_auth_response(sdata_t *sdata, const int64_t client_id, const bool ret, - json_t *id_val, json_t *err_val) -{ - json_t *json_msg = json_object(); - - json_object_set_new_nocheck(json_msg, "result", json_boolean(ret)); - json_object_set_new_nocheck(json_msg, "error", err_val ? err_val : json_null()); - json_object_set(json_msg, "id", id_val); - stratum_add_send(sdata, json_msg, client_id, SM_AUTHRESULT); -} - -static void send_auth_success(ckpool_t *ckp, sdata_t *sdata, stratum_instance_t *client) -{ - char *buf; - - ASPRINTF(&buf, "Authorised, welcome to %s %s!", ckp->name, - client->user_instance->username); - stratum_send_message(sdata, client, buf); - free(buf); -} - -static void send_auth_failure(sdata_t *sdata, stratum_instance_t *client) -{ - stratum_send_message(sdata, client, "Failed authorisation :("); -} - -/* For finding a client by its virtualid instead of client->id. This is an - * inefficient lookup but only occurs once on parsing a remote auth from the - * upstream pool on passthrough subclients. */ -static stratum_instance_t *ref_instance_by_virtualid(sdata_t *sdata, int64_t *client_id) -{ - stratum_instance_t *client, *ret = NULL; - - ck_wlock(&sdata->instance_lock); - for (client = sdata->stratum_instances; client; client = client->hh.next) { - if (likely(client->virtualid != *client_id)) - continue; - if (likely(!client->dropped)) { - ret = client; - __inc_instance_ref(ret); - /* Replace the client_id with the correct one, allowing - * us to send the response to the correct client */ - *client_id = client->id; - } - break; - } - ck_wunlock(&sdata->instance_lock); - - return ret; -} - -void parse_upstream_auth(ckpool_t *ckp, json_t *val) -{ - json_t *id_val = NULL, *err_val = NULL; - sdata_t *sdata = ckp->sdata; - stratum_instance_t *client; - bool ret, warn = false; - int64_t client_id; - - id_val = json_object_get(val, "id"); - if (unlikely(!id_val)) - goto out; - if (unlikely(!json_get_int64(&client_id, val, "client_id"))) - goto out; - if (unlikely(!json_get_bool(&ret, val, "result"))) - goto out; - err_val = json_object_get(val, "error"); - client = ref_instance_by_id(sdata, client_id); - /* Is this client_id a virtualid from a passthrough subclient */ - if (!client) - client = ref_instance_by_virtualid(sdata, &client_id); - if (!client) { - LOGINFO("Failed to find client id %"PRId64" in parse_upstream_auth", - client_id); - goto out; - } - if (ret) - send_auth_success(ckp, sdata, client); - else - send_auth_failure(sdata, client); - send_auth_response(sdata, client_id, ret, id_val, err_val); - client_auth(ckp, client, client->user_instance, ret); - dec_instance_ref(sdata, client); -out: - if (unlikely(warn)) { - char *s = json_dumps(val, 0); - - LOGWARNING("Failed to get valid upstream result in parse_upstream_auth %s", s); - free(s); - } -} - -void parse_upstream_workinfo(ckpool_t *ckp, json_t *val) -{ - add_node_base(ckp, val, true, 0); -} - -#define parse_remote_workinfo(ckp, val, client_id) add_node_base(ckp, val, true, client_id) - -static void parse_remote_auth(ckpool_t *ckp, sdata_t *sdata, json_t *val, stratum_instance_t *remote, - const int64_t remote_id) -{ - json_t *params, *method, *id_val; - stratum_instance_t *client; - json_params_t *jp; - int64_t client_id; - - if (ckp->btcsolo) { - LOGWARNING("Got remote auth request in btcsolo mode, ignoring!"); - return; - } - json_get_int64(&client_id, val, "clientid"); - /* Encode remote server client_id into remote client's id */ - client_id = (remote_id << 32) | (client_id & 0xffffffffll); - id_val = json_object_get(val, "id"); - method = json_object_get(val, "method"); - params = json_object_get(val, "params"); - jp = create_json_params(client_id, method, params, id_val); - - /* This is almost certainly the first time we'll see this client_id so - * create a new stratum instance temporarily just for auth with a plan - * to drop the client id locally once we finish with it */ - ck_wlock(&sdata->instance_lock); - client = __instance_by_id(sdata, client_id); - if (likely(!client)) - client = __stratum_add_instance(ckp, client_id, remote->address, remote->server); - client->remote = true; - json_strdup(&client->useragent, val, "useragent"); - json_strcpy(client->enonce1, val, "enonce1"); - json_strcpy(client->address, val, "address"); - ck_wunlock(&sdata->instance_lock); - - ckmsgq_add(sdata->sauthq, jp); -} - -/* Get the remote worker count once per minute from all the remote servers */ -static void parse_remote_workers(sdata_t *sdata, const json_t *val, const char *buf) -{ - json_t *username_val = json_object_get(val, "username"); - user_instance_t *user; - const char *username; - int workers; - - username = json_string_value(username_val); - if (unlikely(!username_val || !username)) { - LOGWARNING("Failed to get username from remote message %s", buf); - return; - } - user = get_user(sdata, username); - if (unlikely(!json_get_int(&workers, val, "workers"))) { - LOGWARNING("Failed to get workers from remote message %s", buf); - return; - } - user->remote_workers += workers; - LOGDEBUG("Adding %d remote workers to user %s", workers, username); -} - -/* Attempt to submit a remote block locally by recreating it from its workinfo */ -static void parse_remote_block(ckpool_t *ckp, sdata_t *sdata, json_t *val, const char *buf, - const int64_t client_id) -{ - json_t *workername_val = json_object_get(val, "workername"), - *name_val = json_object_get(val, "name"), *res; - const char *workername, *name, *coinbasehex, *swaphex, *cnfrm; - workbase_t *wb = NULL; - double diff = 0; - int height = 0; - int64_t id = 0; - char *msg; - int cblen; - - name = json_string_value(name_val); - if (!name_val || !name) - goto out_add; - - /* If this is the confirm block message don't try to resubmit it */ - cnfrm = json_string_value(json_object_get(val, "confirmed")); - if (cnfrm && cnfrm[0] == '1') - goto out_add; - - json_get_int64(&id, val, "workinfoid"); - coinbasehex = json_string_value(json_object_get(val, "coinbasehex")); - swaphex = json_string_value(json_object_get(val, "swaphex")); - json_get_int(&cblen, val, "cblen"); - json_get_double(&diff, val, "diff"); - - if (likely(id && coinbasehex && swaphex && cblen)) - wb = get_remote_workbase(sdata, id, client_id); - - if (unlikely(!wb)) - LOGWARNING("Inadequate data locally to attempt submit of remote block"); - else { - uchar swap[80], hash[32], hash1[32], flip32[32]; - char *coinbase = alloca(cblen), *gbt_block; - char blockhash[68]; - - LOGWARNING("Possible remote block solve diff %lf !", diff); - hex2bin(coinbase, coinbasehex, cblen); - hex2bin(swap, swaphex, 80); - sha256(swap, 80, hash1); - sha256(hash1, 32, hash); - gbt_block = process_block(wb, coinbase, cblen, swap, hash, flip32, blockhash); - /* Note nodes use jobid of the mapped_id instead of workinfoid */ - json_set_int64(val, "jobid", wb->mapped_id); - send_nodes_block(sdata, val, client_id); - /* We rely on the remote server to give us the ID_BLOCK - * responses, so only use this response to determine if we - * should reset the best shares. */ - if (local_block_submit(ckp, gbt_block, flip32, wb->height)) { - block_share_summary(sdata); - reset_bestshares(sdata); - } - put_remote_workbase(sdata, wb); - } - - workername = json_string_value(workername_val); - if (unlikely(!workername_val || !workername)) { - LOGWARNING("Failed to get workername from remote message %s", buf); - workername = ""; - } - if (unlikely(!json_get_int(&height, val, "height"))) - LOGWARNING("Failed to get height from remote message %s", buf); - ASPRINTF(&msg, "Block %d solved by %s @ %s!", height, workername, name); - LOGWARNING("%s", msg); - stratum_broadcast_message(sdata, msg); - free(msg); -out_add: - /* Make a duplicate for use downstream */ - res = json_deep_copy(val); - remap_workinfo_id(sdata, res, client_id); - if (!ckp->remote) - downstream_json(sdata, res, client_id, SSEND_PREPEND); - - json_decref(res); -} - -void parse_upstream_block(ckpool_t *ckp, json_t *val) -{ - char *buf; - sdata_t *sdata = ckp->sdata; - - buf = json_dumps(val, 0); - parse_remote_block(ckp, sdata, val, buf, 0); - free(buf); -} - -static void send_remote_pong(sdata_t *sdata, stratum_instance_t *client) -{ - json_t *json_msg; - - JSON_CPACK(json_msg, "{ss}", "method", "pong"); - stratum_add_send(sdata, json_msg, client->id, SM_PONG); -} - -static void add_node_txns(ckpool_t *ckp, sdata_t *sdata, const json_t *val) -{ - json_t *txn_array, *txn_val, *data_val, *hash_val; - txntable_t *txns = NULL; - int i, arr_size; - int added = 0; - - txn_array = json_object_get(val, "transaction"); - arr_size = json_array_size(txn_array); - - for (i = 0; i < arr_size; i++) { - const char *hash, *data; - - txn_val = json_array_get(txn_array, i); - data_val = json_object_get(txn_val, "data"); - hash_val = json_object_get(txn_val, "hash"); - data = json_string_value(data_val); - hash = json_string_value(hash_val); - if (unlikely(!data || !hash)) { - LOGERR("Failed to get hash/data in add_node_txns"); - continue; - } - - if (add_txn(ckp, sdata, &txns, hash, data, false)) - added++; - } - - if (added) - update_txns(ckp, sdata, txns, false); -} - -void parse_remote_txns(ckpool_t *ckp, const json_t *val) -{ - add_node_txns(ckp, ckp->sdata, val); -} - -static json_t *get_hash_transactions(sdata_t *sdata, const json_t *hashes) -{ - json_t *txn_array = json_array(), *arr_val; - int found = 0; - size_t index; - - ck_rlock(&sdata->txn_lock); - json_array_foreach(hashes, index, arr_val) { - const char *hash = json_string_value(arr_val); - json_t *txn_val; - txntable_t *txn; - - HASH_FIND_STR(sdata->txns, hash, txn); - if (!txn) - continue; - JSON_CPACK(txn_val, "{ss,ss}", - "hash", hash, "data", txn->data); - json_array_append_new(txn_array, txn_val); - found++; - } - ck_runlock(&sdata->txn_lock); - - return txn_array; -} - -static json_t *get_reqtxns(sdata_t *sdata, const json_t *val, bool downstream) -{ - json_t *hashes = json_object_get(val, "hash"); - json_t *txns, *ret = NULL; - int requested, found; - - if (unlikely(!hashes) || !json_is_array(hashes)) - goto out; - requested = json_array_size(hashes); - if (unlikely(!requested)) - goto out; - - txns = get_hash_transactions(sdata, hashes); - found = json_array_size(txns); - if (found) { - JSON_CPACK(ret, "{ssso}", "method", stratum_msgs[SM_TRANSACTIONS], "transaction", txns); - LOGINFO("Sending %d found of %d requested txns %s", found, requested, - downstream ? "downstream" : "upstream"); - } else - json_decref(txns); -out: - return ret; -} - -static void parse_remote_reqtxns(sdata_t *sdata, const json_t *val, const int64_t client_id) -{ - json_t *ret = get_reqtxns(sdata, val, true); - - if (!ret) - return; - stratum_add_send(sdata, ret, client_id, SM_TRANSACTIONS); -} - -void parse_upstream_reqtxns(ckpool_t *ckp, json_t *val) -{ - json_t *ret = get_reqtxns(ckp->sdata, val, false); - char *msg; - - if (!ret) - return; - msg = json_dumps(ret, JSON_NO_UTF8 | JSON_PRESERVE_ORDER | JSON_COMPACT | JSON_EOL); - json_decref(ret); - connector_upstream_msg(ckp, msg); -} - -static void parse_trusted_msg(ckpool_t *ckp, sdata_t *sdata, json_t *val, stratum_instance_t *client) -{ - json_t *method_val = json_object_get(val, "method"); - char *buf = json_dumps(val, 0); - const char *method; - - LOGDEBUG("Got remote message %s", buf); - method = json_string_value(method_val); - if (unlikely(!method_val || !method)) { - LOGWARNING("Failed to get method from remote message %s", buf); - goto out; - } - - if (likely(!safecmp(method, stratum_msgs[SM_SHARE]))) - parse_remote_share(ckp, sdata, val, buf); - else if (!safecmp(method, stratum_msgs[SM_TRANSACTIONS])) - add_node_txns(ckp, sdata, val); - else if (!safecmp(method, stratum_msgs[SM_WORKINFO])) - parse_remote_workinfo(ckp, val, client->id); - else if (!safecmp(method, stratum_msgs[SM_AUTH])) - parse_remote_auth(ckp, sdata, val, client, client->id); - else if (!safecmp(method, stratum_msgs[SM_SHAREERR])) - parse_remote_shareerr(ckp, val, buf); - else if (!safecmp(method, stratum_msgs[SM_BLOCK])) - parse_remote_block(ckp, sdata, val, buf, client->id); - else if (!safecmp(method, stratum_msgs[SM_REQTXNS])) - parse_remote_reqtxns(sdata, val, client->id); - else if (!safecmp(method, "workers")) - parse_remote_workers(sdata, val, buf); - else if (!safecmp(method, "ping")) - send_remote_pong(sdata, client); - else - LOGWARNING("unrecognised trusted message %s", buf); -out: - free(buf); -} - -/* Entered with client holding ref count */ -static void node_client_msg(ckpool_t *ckp, json_t *val, stratum_instance_t *client) -{ - json_t *params, *method, *res_val, *id_val, *err_val = NULL; - int msg_type = node_msg_type(val); - sdata_t *sdata = ckp->sdata; - json_params_t *jp; - char *buf = NULL; - - if (msg_type < 0) { - buf = json_dumps(val, 0); - LOGERR("Missing client %s node method from %s", client->identity, buf); - goto out; - } - LOGDEBUG("Got client %s node method %d:%s", client->identity, msg_type, stratum_msgs[msg_type]); - id_val = json_object_get(val, "id"); - method = json_object_get(val, "method"); - params = json_object_get(val, "params"); - res_val = json_object_get(val, "result"); - switch (msg_type) { - case SM_SHARE: - jp = create_json_params(client->id, method, params, id_val); - ckmsgq_add(sdata->sshareq, jp); - break; - case SM_SHARERESULT: - parse_share_result(ckp, client, res_val); - break; - case SM_DIFF: - parse_diff(client, params); - break; - case SM_SUBSCRIBE: - parse_subscribe(client, client->id, params); - break; - case SM_SUBSCRIBERESULT: - parse_subscribe_result(client, res_val); - break; - case SM_AUTH: - parse_authorise(client, params, &err_val); - break; - case SM_AUTHRESULT: - parse_authorise_result(ckp, sdata, client, res_val); - break; - case SM_NONE: - buf = json_dumps(val, 0); - LOGNOTICE("Unrecognised method from client %s :%s", - client->identity, buf); - break; - default: - break; - } -out: - free(buf); -} - -static void parse_node_msg(ckpool_t *ckp, sdata_t *sdata, json_t *val) -{ - int msg_type = node_msg_type(val); - - if (msg_type < 0) { - char *buf = json_dumps(val, 0); - - LOGERR("Missing node method from %s", buf); - free(buf); - return; - } - LOGDEBUG("Got node method %d:%s", msg_type, stratum_msgs[msg_type]); - switch (msg_type) { - case SM_TRANSACTIONS: - add_node_txns(ckp, sdata, val); - break; - case SM_WORKINFO: - add_node_base(ckp, val, false, 0); - break; - case SM_BLOCK: - submit_node_block(ckp, sdata, val); - break; - default: - break; - } -} - -/* Entered with client holding ref count */ -static void parse_instance_msg(ckpool_t *ckp, sdata_t *sdata, smsg_t *msg, stratum_instance_t *client) -{ - json_t *val = msg->json_msg, *id_val, *method, *params; - int64_t client_id = msg->client_id; - int delays = 0; - - if (client->reject == 3) { - LOGINFO("Dropping client %s %s tagged for lazy invalidation", - client->identity, client->address); - connector_drop_client(ckp, client_id); - return; - } - - /* Return back the same id_val even if it's null or not existent. */ - id_val = json_object_get(val, "id"); - - method = json_object_get(val, "method"); - if (unlikely(!method)) { - json_t *res_val = json_object_get(val, "result"); - - /* Is this a spurious result or ping response? */ - if (res_val) { - const char *result = json_string_value(res_val); - - if (!safecmp(result, "pong")) - LOGDEBUG("Received pong from client %s", client->identity); - else - LOGDEBUG("Received spurious response %s from client %s", - result ? result : "", client->identity); - return; - } - send_json_err(sdata, client_id, id_val, "-3:method not found"); - return; - } - if (unlikely(!json_is_string(method))) { - send_json_err(sdata, client_id, id_val, "-1:method is not string"); - return; - } - params = json_object_get(val, "params"); - if (unlikely(!params)) { - send_json_err(sdata, client_id, id_val, "-1:params not found"); - return; - } - /* At startup we block until there's a current workbase otherwise we - * will reject miners with the initialising message. A slightly delayed - * response to subscribe is better tolerated. */ - while (unlikely(!ckp->proxy && !sdata->current_workbase)) { - cksleep_ms(100); - if (!(++delays % 50)) - LOGWARNING("%d Second delay waiting for bitcoind at startup", delays / 10); - } - parse_method(ckp, sdata, client, client_id, id_val, method, params); -} - -static void srecv_process(ckpool_t *ckp, json_t *val) -{ - char address[INET6_ADDRSTRLEN], *buf = NULL; - bool noid = false, dropped = false; - sdata_t *sdata = ckp->sdata; - stratum_instance_t *client; - smsg_t *msg; - int server; - - if (unlikely(!val)) { - LOGWARNING("srecv_process received NULL val!"); - return; - } - - msg = ckzalloc(sizeof(smsg_t)); - msg->json_msg = val; - val = json_object_get(msg->json_msg, "client_id"); - if (unlikely(!val)) { - if (ckp->node) - parse_node_msg(ckp, sdata, msg->json_msg); - else { - buf = json_dumps(val, JSON_COMPACT); - LOGWARNING("Failed to extract client_id from connector json smsg %s", buf); - } - goto out; - } - - msg->client_id = json_integer_value(val); - json_object_clear(val); - - val = json_object_get(msg->json_msg, "address"); - if (unlikely(!val)) { - buf = json_dumps(val, JSON_COMPACT); - LOGWARNING("Failed to extract address from connector json smsg %s", buf); - goto out; - } - strcpy(address, json_string_value(val)); - json_object_clear(val); - - val = json_object_get(msg->json_msg, "server"); - if (unlikely(!val)) { - buf = json_dumps(val, JSON_COMPACT); - LOGWARNING("Failed to extract server from connector json smsg %s", buf); - goto out; - } - server = json_integer_value(val); - json_object_clear(val); - - /* Parse the message here */ - ck_wlock(&sdata->instance_lock); - client = __instance_by_id(sdata, msg->client_id); - /* If client_id instance doesn't exist yet, create one */ - if (unlikely(!client)) { - noid = true; - client = __stratum_add_instance(ckp, msg->client_id, address, server); - } else if (unlikely(client->dropped)) - dropped = true; - if (likely(!dropped)) - __inc_instance_ref(client); - ck_wunlock(&sdata->instance_lock); - - if (unlikely(dropped)) { - /* Client may be NULL here */ - LOGNOTICE("Stratifier skipped dropped instance %"PRId64" message from server %d", - msg->client_id, server); - connector_drop_client(ckp, msg->client_id); - goto out; - } - if (unlikely(noid)) - LOGINFO("Stratifier added instance %s server %d", client->identity, server); - - if (client->trusted) - parse_trusted_msg(ckp, sdata, msg->json_msg, client); - else if (ckp->node) - node_client_msg(ckp, msg->json_msg, client); - else - parse_instance_msg(ckp, sdata, msg, client); - dec_instance_ref(sdata, client); -out: - free_smsg(msg); - free(buf); -} - -void _stratifier_add_recv(ckpool_t *ckp, json_t *val, const char *file, const char *func, const int line) -{ - sdata_t *sdata; - - if (unlikely(!val)) { - LOGWARNING("_stratifier_add_recv received NULL val from %s %s:%d", file, func, line); - return; - } - sdata = ckp->sdata; - ckmsgq_add(sdata->srecvs, val); -} - -static void ssend_process(ckpool_t *ckp, smsg_t *msg) -{ - if (unlikely(!msg->json_msg)) { - LOGERR("Sent null json msg to stratum_sender"); - free(msg); - return; - } - - /* Add client_id to the json message and send it to the - * connector process to be delivered */ - json_object_set_new_nocheck(msg->json_msg, "client_id", json_integer(msg->client_id)); - connector_add_message(ckp, msg->json_msg); - /* The connector will free msg->json_msg */ - free(msg); -} - -static void discard_json_params(json_params_t *jp) -{ - json_decref(jp->method); - json_decref(jp->params); - if (jp->id_val) - json_decref(jp->id_val); - free(jp); -} - -static void steal_json_id(json_t *val, json_params_t *jp) -{ - /* Steal the id_val as is to avoid a copy */ - json_object_set_new_nocheck(val, "id", jp->id_val); - jp->id_val = NULL; -} - -static void sshare_process(ckpool_t *ckp, json_params_t *jp) -{ - json_t *result_val, *json_msg, *err_val = NULL; - stratum_instance_t *client; - sdata_t *sdata = ckp->sdata; - int64_t client_id; - - client_id = jp->client_id; - - client = ref_instance_by_id(sdata, client_id); - if (unlikely(!client)) { - LOGINFO("Share processor failed to find client id %"PRId64" in hashtable!", client_id); - goto out; - } - if (unlikely(!client->authorised)) { - LOGDEBUG("Client %s no longer authorised to submit shares", client->identity); - goto out_decref; - } - json_msg = json_object(); - result_val = parse_submit(client, json_msg, jp->params, &err_val); - json_object_set_new_nocheck(json_msg, "result", result_val); - json_object_set_new_nocheck(json_msg, "error", err_val ? err_val : json_null()); - steal_json_id(json_msg, jp); - stratum_add_send(sdata, json_msg, client_id, SM_SHARERESULT); -out_decref: - dec_instance_ref(sdata, client); -out: - discard_json_params(jp); -} - -/* As ref_instance_by_id but only returns clients not authorising or authorised, - * and sets the authorising flag */ -static stratum_instance_t *preauth_ref_instance_by_id(sdata_t *sdata, const int64_t id) -{ - stratum_instance_t *client; - - ck_wlock(&sdata->instance_lock); - client = __instance_by_id(sdata, id); - if (client) { - if (client->dropped || client->authorising || client->authorised) - client = NULL; - else { - __inc_instance_ref(client); - client->authorising = true; - } - } - ck_wunlock(&sdata->instance_lock); - - return client; -} - -/* Send the auth upstream in trusted remote mode, allowing the connector to - * asynchronously receive the response and return the auth response. */ -static void upstream_auth(ckpool_t *ckp, stratum_instance_t *client, json_params_t *jp) -{ - json_t *val = json_object(); - char cdfield[64]; - char *msg; - ts_t now; - - ts_realtime(&now); - sprintf(cdfield, "%lu,%lu", now.tv_sec, now.tv_nsec); - - json_set_object(val, "params", jp->params); - json_set_object(val, "id", jp->id_val); - json_set_object(val, "method", jp->method); - json_set_string(val, "method", stratum_msgs[SM_AUTH]); - - json_set_string(val, "useragent", client->useragent ? : ""); - json_set_string(val, "enonce1", client->enonce1 ? : ""); - json_set_string(val, "address", client->address); - json_set_int64(val, "clientid", client->virtualid); - msg = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER | JSON_COMPACT | JSON_EOL); - json_decref(val); - connector_upstream_msg(ckp, msg); -} - -static void sauth_process(ckpool_t *ckp, json_params_t *jp) -{ - json_t *result_val, *err_val = NULL; - sdata_t *sdata = ckp->sdata; - stratum_instance_t *client; - int64_t mindiff, client_id; - bool ret; - - client_id = jp->client_id; - - client = preauth_ref_instance_by_id(sdata, client_id); - if (unlikely(!client)) { - LOGINFO("Authoriser failed to find client id %"PRId64" in hashtable!", client_id); - goto out_noclient; - } - - result_val = parse_authorise(client, jp->params, &err_val); - ret = json_is_true(result_val); - if (ret) { - /* So far okay in remote mode, remainder to be done by upstream - * pool */ - if (ckp->remote && !ckp->btcsolo) { - upstream_auth(ckp, client, jp); - goto out; - } - send_auth_success(ckp, sdata, client); - } else - send_auth_failure(sdata, client); - send_auth_response(sdata, client_id, ret, jp->id_val, err_val); - if (!ret) - goto out; - - if (client->remote) { - /* We don't need to keep a record of clients on remote trusted - * servers after auth'ing them. */ - client->dropped = true; - goto out; - } - - /* Update the client now if they have set a valid mindiff different - * from the startdiff. suggest_diff overrides worker mindiff */ - if (client->suggest_diff) - mindiff = client->suggest_diff; - else - mindiff = client->worker_instance->mindiff; - if (mindiff) { - mindiff = MAX(ckp->mindiff, mindiff); - if (mindiff != client->diff) { - client->diff = mindiff; - stratum_send_diff(sdata, client); - } - } - -out: - dec_instance_ref(sdata, client); -out_noclient: - discard_json_params(jp); - -} - -static int transactions_by_jobid(sdata_t *sdata, const int64_t id) -{ - workbase_t *wb; - int ret = -1; - - ck_rlock(&sdata->workbase_lock); - HASH_FIND_I64(sdata->workbases, &id, wb); - if (wb) - ret = wb->txns; - ck_runlock(&sdata->workbase_lock); - - return ret; -} - -static json_t *txnhashes_by_jobid(sdata_t *sdata, const int64_t id) -{ - json_t *ret = NULL; - workbase_t *wb; - - ck_rlock(&sdata->workbase_lock); - HASH_FIND_I64(sdata->workbases, &id, wb); - if (wb) - ret = json_string(wb->txn_hashes); - ck_runlock(&sdata->workbase_lock); - - return ret; -} - -static void send_transactions(ckpool_t *ckp, json_params_t *jp) -{ - const char *msg = json_string_value(jp->method), - *params = json_string_value(json_array_get(jp->params, 0)); - stratum_instance_t *client = NULL; - sdata_t *sdata = ckp->sdata; - json_t *val, *hashes; - int64_t job_id = 0; - time_t now_t; - - if (unlikely(!msg || !strlen(msg))) { - LOGWARNING("send_transactions received null method"); - goto out; - } - val = json_object(); - steal_json_id(val, jp); - if (cmdmatch(msg, "mining.get_transactions")) { - int txns; - - /* We don't actually send the transactions as that would use - * up huge bandwidth, so we just return the number of - * transactions :) . Support both forms of encoding the - * request in method name and as a parameter. */ - if (params && strlen(params) > 0) - sscanf(params, "%lx", &job_id); - else - sscanf(msg, "mining.get_transactions(%lx", &job_id); - txns = transactions_by_jobid(sdata, job_id); - if (txns != -1) { - json_set_int(val, "result", txns); - json_object_set_new_nocheck(val, "error", json_null()); - } else - json_set_string(val, "error", "Invalid job_id"); - goto out_send; - } - if (!cmdmatch(msg, "mining.get_txnhashes")) { - LOGDEBUG("Unhandled mining get request: %s", msg); - json_set_string(val, "error", "Unhandled"); - goto out_send; - } - - client = ref_instance_by_id(sdata, jp->client_id); - if (unlikely(!client)) { - LOGINFO("send_transactions failed to find client id %"PRId64" in hashtable!", - jp->client_id); - goto out; - } - - now_t = time(NULL); - if (now_t - client->last_txns < ckp->update_interval) { - LOGNOTICE("Rate limiting get_txnhashes on client %"PRId64"!", jp->client_id); - json_set_string(val, "error", "Ratelimit"); - goto out_send; - } - client->last_txns = now_t; - if (!params || !strlen(params)) { - json_set_string(val, "error", "Invalid params"); - goto out_send; - } - sscanf(params, "%lx", &job_id); - hashes = txnhashes_by_jobid(sdata, job_id); - if (hashes) { - json_object_set_new_nocheck(val, "result", hashes); - json_object_set_new_nocheck(val, "error", json_null()); - } else - json_set_string(val, "error", "Invalid job_id"); -out_send: - stratum_add_send(sdata, val, jp->client_id, SM_TXNSRESULT); -out: - if (client) - dec_instance_ref(sdata, client); - discard_json_params(jp); -} - -static void add_log_entry(log_entry_t **entries, char **fname, char **buf) -{ - log_entry_t *entry = ckalloc(sizeof(log_entry_t)); - - entry->fname = *fname; - *fname = NULL; - entry->buf = *buf; - *buf = NULL; - DL_APPEND(*entries, entry); -} - -static void dump_log_entries(log_entry_t **entries) -{ - log_entry_t *entry, *tmpentry; - FILE *fp; - - DL_FOREACH_SAFE(*entries, entry, tmpentry) { - DL_DELETE(*entries, entry); - fp = fopen(entry->fname, "we"); - if (likely(fp)) { - fprintf(fp, "%s", entry->buf); - fclose(fp); - } else - LOGERR("Failed to fopen %s in dump_log_entries", entry->fname); - free(entry->fname); - free(entry->buf); - free(entry); - } -} - -static void upstream_workers(ckpool_t *ckp, user_instance_t *user) -{ - char *msg; - - ASPRINTF(&msg, "{\"method\":\"workers\",\"username\":\"%s\",\"workers\":%d}\n", - user->username, user->workers); - connector_upstream_msg(ckp, msg); -} - - -/* To iterate over all users, if user is initially NULL, this will return the first entry, - * otherwise it will return the entry after user, and NULL if there are no more entries. - * Allows us to grab and drop the lock on each iteration. */ -static user_instance_t *next_user(sdata_t *sdata, user_instance_t *user) -{ - ck_rlock(&sdata->instance_lock); - if (unlikely(!user)) - user = sdata->user_instances; - else - user = user->hh.next; - ck_runlock(&sdata->instance_lock); - - return user; -} - -/* Ditto for worker */ -static worker_instance_t *next_worker(sdata_t *sdata, user_instance_t *user, worker_instance_t *worker) -{ - ck_rlock(&sdata->instance_lock); - if (!worker) - worker = user->worker_instances; - else - worker = worker->next; - ck_runlock(&sdata->instance_lock); - - return worker; -} - -static void *statsupdate(void *arg) -{ - ckpool_t *ckp = (ckpool_t *)arg; - sdata_t *sdata = ckp->sdata; - pool_stats_t *stats = &sdata->stats; - - pthread_detach(pthread_self()); - rename_proc("statsupdate"); - - tv_time(&stats->start_time); - cksleep_prepare_r(&stats->last_update); - sleep(1); - - while (42) { - double ghs, ghs1, ghs5, ghs15, ghs60, ghs360, ghs1440, ghs10080, - per_tdiff, percent; - char suffix1[16], suffix5[16], suffix15[16], suffix60[16], cdfield[64]; - char suffix360[16], suffix1440[16], suffix10080[16]; - int remote_users = 0, remote_workers = 0, idle_workers = 0; - log_entry_t *log_entries = NULL; - char_entry_t *char_list = NULL; - stratum_instance_t *client; - user_instance_t *user; - char *fname, *s, *sp; - tv_t now, diff; - ts_t ts_now; - json_t *val; - FILE *fp; - int i; - - tv_time(&now); - timersub(&now, &stats->start_time, &diff); - - ck_wlock(&sdata->instance_lock); - /* Grab the first entry */ - client = sdata->stratum_instances; - if (likely(client)) - __inc_instance_ref(client); - ck_wunlock(&sdata->instance_lock); - - while (client) { - tv_time(&now); - /* Look for clients that may have been dropped which the - * stratifier has not been informed about and ask the - * connector if they still exist */ - if (client->dropped) - connector_test_client(ckp, client->id); - else if (remote_server(client)) { - /* Do nothing to these */ - } else if (!client->authorised) { - /* Test for clients that haven't authed in over a minute - * and drop them lazily */ - if (now.tv_sec > client->start_time + 60) { - client->dropped = true; - connector_drop_client(ckp, client->id); - } - } else { - per_tdiff = tvdiff(&now, &client->last_share); - /* Decay times per connected instance */ - if (per_tdiff > 60) { - /* No shares for over a minute, decay to 0 */ - decay_client(client, 0, &now); - idle_workers++; - if (per_tdiff > 600) - client->idle = true; - /* Test idle clients are still connected */ - connector_test_client(ckp, client->id); - } - } - - ck_wlock(&sdata->instance_lock); - /* Drop the reference of the last entry we examined, - * then grab the next client. */ - __dec_instance_ref(client); - client = client->hh.next; - /* Grab a reference to this client allowing us to examine - * it without holding the lock */ - if (likely(client)) - __inc_instance_ref(client); - ck_wunlock(&sdata->instance_lock); - } - - user = NULL; - - while ((user = next_user(sdata, user)) != NULL) { - worker_instance_t *worker; - json_t *user_array; - - if (!user->authorised) - continue; - - tv_time(&now); - - /* Decay times per user */ - per_tdiff = tvdiff(&now, &user->last_share); - /* Drop storage of users with no shares */ - if (!user->last_share.tv_sec) { - LOGDEBUG("Skipping inactive user %s", user->username); - continue; - } - if (per_tdiff > 60) - decay_user(user, 0, &now); - - ghs = user->dsps1440 * nonces; - suffix_string(ghs, suffix1440, 16, 0); - - ghs = user->dsps1 * nonces; - suffix_string(ghs, suffix1, 16, 0); - - ghs = user->dsps5 * nonces; - suffix_string(ghs, suffix5, 16, 0); - - ghs = user->dsps60 * nonces; - suffix_string(ghs, suffix60, 16, 0); - - ghs = user->dsps10080 * nonces; - suffix_string(ghs, suffix10080, 16, 0); - - JSON_CPACK(val, "{ss,ss,ss,ss,ss,si,si,sI,sf,sI, sI}", - "hashrate1m", suffix1, - "hashrate5m", suffix5, - "hashrate1hr", suffix60, - "hashrate1d", suffix1440, - "hashrate7d", suffix10080, - "lastshare", user->last_share.tv_sec, - "workers", user->workers + user->remote_workers, - "shares", user->shares, - "bestshare", user->best_diff, - "bestever", user->best_ever, - "authorised", user->auth_time); - - if (user->remote_workers) { - remote_workers += user->remote_workers; - /* Reset the remote_workers count once per minute */ - user->remote_workers = 0; - /* We check this unlocked but transiently - * wrong is harmless */ - if (!user->workers) - remote_users++; - } - - s = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER | JSON_COMPACT); - ASPRINTF(&sp, "User %s:%s", user->username, s); - dealloc(s); - add_msg_entry(&char_list, &sp); - - user_array = json_array(); - worker = NULL; - - /* Decay times per worker */ - while ((worker = next_worker(sdata, user, worker)) != NULL) { - json_t *wval; - - per_tdiff = tvdiff(&now, &worker->last_share); - if (per_tdiff > 60) { - decay_worker(worker, 0, &now); - worker->idle = true; - /* Drop storage of workers idle for 1 week */ - if (per_tdiff > 600000) { - LOGDEBUG("Skipping inactive worker %s", worker->workername); - continue; - } - } - - ghs = worker->dsps1440 * nonces; - suffix_string(ghs, suffix1440, 16, 0); - - ghs = worker->dsps1 * nonces; - suffix_string(ghs, suffix1, 16, 0); - - ghs = worker->dsps5 * nonces; - suffix_string(ghs, suffix5, 16, 0); - - ghs = worker->dsps60 * nonces; - suffix_string(ghs, suffix60, 16, 0); - - ghs = worker->dsps10080 * nonces; - suffix_string(ghs, suffix10080, 16, 0); - - LOGDEBUG("Storing worker %s", worker->workername); - - JSON_CPACK(wval, "{ss,ss,ss,ss,ss,ss,si,sI,sf,sI}", - "workername", worker->workername, - "hashrate1m", suffix1, - "hashrate5m", suffix5, - "hashrate1hr", suffix60, - "hashrate1d", suffix1440, - "hashrate7d", suffix10080, - "lastshare", worker->last_share.tv_sec, - "shares", worker->shares, - "bestshare", worker->best_diff, - "bestever", worker->best_ever); - json_array_append_new(user_array, wval); - } - - json_object_set_new_nocheck(val, "worker", user_array); - ASPRINTF(&fname, "%s/users/%s", ckp->logdir, user->username); - s = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER | JSON_EOL | - JSON_REAL_PRECISION(16) | JSON_INDENT(1)); - add_log_entry(&log_entries, &fname, &s); - json_decref(val); - if (ckp->remote) - upstream_workers(ckp, user); - } - - if (remote_workers) { - mutex_lock(&sdata->stats_lock); - stats->remote_workers = remote_workers; - stats->remote_users = remote_users; - mutex_unlock(&sdata->stats_lock); - } - - /* Dump log entries out of instance_lock */ - dump_log_entries(&log_entries); - notice_msg_entries(&char_list); - - ghs1 = stats->dsps1 * nonces; - suffix_string(ghs1, suffix1, 16, 0); - - ghs5 = stats->dsps5 * nonces; - suffix_string(ghs5, suffix5, 16, 0); - - ghs15 = stats->dsps15 * nonces; - suffix_string(ghs15, suffix15, 16, 0); - - ghs60 = stats->dsps60 * nonces; - suffix_string(ghs60, suffix60, 16, 0); - - ghs360 = stats->dsps360 * nonces; - suffix_string(ghs360, suffix360, 16, 0); - - ghs1440 = stats->dsps1440 * nonces; - suffix_string(ghs1440, suffix1440, 16, 0); - - ghs10080 = stats->dsps10080 * nonces; - suffix_string(ghs10080, suffix10080, 16, 0); - - ASPRINTF(&fname, "%s/pool/pool.status", ckp->logdir); - fp = fopen(fname, "we"); - if (unlikely(!fp)) - LOGERR("Failed to fopen %s", fname); - dealloc(fname); - - JSON_CPACK(val, "{si,si,si,si,si,si}", - "runtime", diff.tv_sec, - "lastupdate", now.tv_sec, - "Users", stats->users + stats->remote_users, - "Workers", stats->workers + stats->remote_workers, - "Idle", idle_workers, - "Disconnected", stats->disconnected); - s = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER); - json_decref(val); - LOGNOTICE("Pool:%s", s); - fprintf(fp, "%s\n", s); - dealloc(s); - - JSON_CPACK(val, "{ss,ss,ss,ss,ss,ss,ss}", - "hashrate1m", suffix1, - "hashrate5m", suffix5, - "hashrate15m", suffix15, - "hashrate1hr", suffix60, - "hashrate6hr", suffix360, - "hashrate1d", suffix1440, - "hashrate7d", suffix10080); - s = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER); - json_decref(val); - LOGNOTICE("Pool:%s", s); - fprintf(fp, "%s\n", s); - dealloc(s); - - /* Round to 4 significant digits */ - percent = round(stats->accounted_diff_shares * 10000 / stats->network_diff) / 100; - JSON_CPACK(val, "{sf,sI,sI,sI,sf,sf,sf,sf}", - "diff", percent, - "accepted", stats->accounted_diff_shares, - "rejected", stats->accounted_rejects, - "bestshare", stats->best_diff, - "SPS1m", stats->sps1, - "SPS5m", stats->sps5, - "SPS15m", stats->sps15, - "SPS1h", stats->sps60); - s = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER | JSON_REAL_PRECISION(3)); - json_decref(val); - LOGNOTICE("Pool:%s", s); - fprintf(fp, "%s\n", s); - dealloc(s); - fclose(fp); - - if (ckp->proxy && sdata->proxy) { - proxy_t *proxy, *proxytmp, *subproxy, *subtmp; - - mutex_lock(&sdata->proxy_lock); - JSON_CPACK(val, "{sI,si,si}", - "current", sdata->proxy->id, - "active", HASH_COUNT(sdata->proxies), - "total", sdata->proxy_count); - mutex_unlock(&sdata->proxy_lock); - - s = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER); - json_decref(val); - LOGNOTICE("Proxy:%s", s); - dealloc(s); - - mutex_lock(&sdata->proxy_lock); - HASH_ITER(hh, sdata->proxies, proxy, proxytmp) { - JSON_CPACK(val, "{sI,si,sI,sb}", - "id", proxy->id, - "subproxies", proxy->subproxy_count, - "clients", proxy->combined_clients, - "alive", !proxy->dead); - s = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER); - json_decref(val); - ASPRINTF(&sp, "Proxies:%s", s); - dealloc(s); - add_msg_entry(&char_list, &sp); - HASH_ITER(sh, proxy->subproxies, subproxy, subtmp) { - JSON_CPACK(val, "{sI,si,si,sI,sI,sf,sb}", - "id", subproxy->id, - "subid", subproxy->subid, - "nonce2len", subproxy->nonce2len, - "clients", subproxy->bound_clients, - "maxclients", subproxy->max_clients, - "diff", subproxy->diff, - "alive", !subproxy->dead); - s = json_dumps(val, JSON_NO_UTF8 | JSON_PRESERVE_ORDER); - json_decref(val); - ASPRINTF(&sp, "Subproxies:%s", s); - dealloc(s); - add_msg_entry(&char_list, &sp); - } - } - mutex_unlock(&sdata->proxy_lock); - info_msg_entries(&char_list); - } - - ts_realtime(&ts_now); - sprintf(cdfield, "%lu,%lu", ts_now.tv_sec, ts_now.tv_nsec); - JSON_CPACK(val, "{ss,si,si,si,sf,sf,sf,sf,ss,ss,ss,ss}", - "poolinstance", ckp->name, - "elapsed", diff.tv_sec, - "users", stats->users + stats->remote_users, - "workers", stats->workers + stats->remote_workers, - "hashrate", ghs1, - "hashrate5m", ghs5, - "hashrate1hr", ghs60, - "hashrate24hr", ghs1440, - "createdate", cdfield, - "createby", "code", - "createcode", __func__, - "createinet", ckp->serverurl[0]); - json_decref(val); - - /* Update stats 32 times per minute to divide up userstats, - * displaying status every minute. */ - for (i = 0; i < 32; i++) { - int64_t unaccounted_shares, - unaccounted_diff_shares, - unaccounted_rejects; - - ts_to_tv(&diff, &stats->last_update); - cksleep_ms_r(&stats->last_update, 1875); - cksleep_prepare_r(&stats->last_update); - ts_to_tv(&now, &stats->last_update); - /* Calculate how long it's really been for accurate - * stats update */ - per_tdiff = tvdiff(&now, &diff); - - mutex_lock(&sdata->uastats_lock); - unaccounted_shares = stats->unaccounted_shares; - unaccounted_diff_shares = stats->unaccounted_diff_shares; - unaccounted_rejects = stats->unaccounted_rejects; - stats->unaccounted_shares = - stats->unaccounted_diff_shares = - stats->unaccounted_rejects = 0; - mutex_unlock(&sdata->uastats_lock); - - mutex_lock(&sdata->stats_lock); - stats->accounted_shares += unaccounted_shares; - stats->accounted_diff_shares += unaccounted_diff_shares; - stats->accounted_rejects += unaccounted_rejects; - - decay_time(&stats->sps1, unaccounted_shares, per_tdiff, MIN1); - decay_time(&stats->sps5, unaccounted_shares, per_tdiff, MIN5); - decay_time(&stats->sps15, unaccounted_shares, per_tdiff, MIN15); - decay_time(&stats->sps60, unaccounted_shares, per_tdiff, HOUR); - - decay_time(&stats->dsps1, unaccounted_diff_shares, per_tdiff, MIN1); - decay_time(&stats->dsps5, unaccounted_diff_shares, per_tdiff, MIN5); - decay_time(&stats->dsps15, unaccounted_diff_shares, per_tdiff, MIN15); - decay_time(&stats->dsps60, unaccounted_diff_shares, per_tdiff, HOUR); - decay_time(&stats->dsps360, unaccounted_diff_shares, per_tdiff, HOUR6); - decay_time(&stats->dsps1440, unaccounted_diff_shares, per_tdiff, DAY); - decay_time(&stats->dsps10080, unaccounted_diff_shares, per_tdiff, WEEK); - mutex_unlock(&sdata->stats_lock); - } - - /* Reset remote workers every minute since we measure it once - * every minute only. */ - mutex_lock(&sdata->stats_lock); - stats->remote_workers = stats->remote_users = 0; - mutex_unlock(&sdata->stats_lock); - } - - return NULL; -} - -static void read_poolstats(ckpool_t *ckp, int *tvsec_diff) -{ - char *s = alloca(4096), *pstats, *dsps, *sps; - sdata_t *sdata = ckp->sdata; - pool_stats_t *stats = &sdata->stats; - tv_t now, last; - json_t *val; - FILE *fp; - int ret; - - snprintf(s, 4095, "%s/pool/pool.status", ckp->logdir); - fp = fopen(s, "re"); - if (!fp) { - LOGINFO("Pool does not have a logfile to read"); - return; - } - memset(s, 0, 4096); - ret = fread(s, 1, 4095, fp); - fclose(fp); - if (ret < 1 || !strlen(s)) { - LOGDEBUG("No string to read in pool logfile"); - return; - } - /* Strip out end of line terminators */ - pstats = strsep(&s, "\n"); - dsps = strsep(&s, "\n"); - sps = strsep(&s, "\n"); - if (!s) { - LOGINFO("Failed to find EOL in pool logfile"); - return; - } - val = json_loads(pstats, 0, NULL); - if (!val) { - LOGINFO("Failed to json decode pstats line from pool logfile: %s", pstats); - return; - } - tv_time(&now); - last.tv_sec = 0; - json_get_int64(&last.tv_sec, val, "lastupdate"); - json_decref(val); - LOGINFO("Successfully read pool pstats: %s", pstats); - - val = json_loads(dsps, 0, NULL); - if (!val) { - LOGINFO("Failed to json decode dsps line from pool logfile: %s", sps); - return; - } - stats->dsps1 = dsps_from_key(val, "hashrate1m"); - stats->dsps5 = dsps_from_key(val, "hashrate5m"); - stats->dsps15 = dsps_from_key(val, "hashrate15m"); - stats->dsps60 = dsps_from_key(val, "hashrate1hr"); - stats->dsps360 = dsps_from_key(val, "hashrate6hr"); - stats->dsps1440 = dsps_from_key(val, "hashrate1d"); - stats->dsps10080 = dsps_from_key(val, "hashrate7d"); - json_decref(val); - LOGINFO("Successfully read pool dsps: %s", dsps); - - val = json_loads(sps, 0, NULL); - if (!val) { - LOGINFO("Failed to json decode sps line from pool logfile: %s", dsps); - return; - } - json_get_double(&stats->sps1, val, "SPS1m"); - json_get_double(&stats->sps5, val, "SPS5m"); - json_get_double(&stats->sps15, val, "SPS15m"); - json_get_double(&stats->sps60, val, "SPS1h"); - json_get_int64(&stats->accounted_diff_shares, val, "accepted"); - json_get_int64(&stats->accounted_rejects, val, "rejected"); - json_get_double(&stats->best_diff, val, "bestshare"); - json_decref(val); - - LOGINFO("Successfully read pool sps: %s", sps); - if (last.tv_sec) - *tvsec_diff = now.tv_sec - last.tv_sec - 60; - if (*tvsec_diff > 60) { - LOGNOTICE("Old pool stats indicate pool down for %d seconds, decaying stats", - *tvsec_diff); - decay_time(&stats->sps1, 0, *tvsec_diff, MIN1); - decay_time(&stats->sps5, 0, *tvsec_diff, MIN5); - decay_time(&stats->sps15, 0, *tvsec_diff, MIN15); - decay_time(&stats->sps60, 0, *tvsec_diff, HOUR); - - decay_time(&stats->dsps1, 0, *tvsec_diff, MIN1); - decay_time(&stats->dsps5, 0, *tvsec_diff, MIN5); - decay_time(&stats->dsps15, 0, *tvsec_diff, MIN15); - decay_time(&stats->dsps60, 0, *tvsec_diff, HOUR); - decay_time(&stats->dsps360, 0, *tvsec_diff, HOUR6); - decay_time(&stats->dsps1440, 0, *tvsec_diff, DAY); - decay_time(&stats->dsps10080, 0, *tvsec_diff, WEEK); - } -} - -static char *status_chars = "|/-\\"; - -void *throbber(void *arg) -{ - ckpool_t *ckp = arg; - sdata_t *sdata = ckp->sdata; - int counter = 0; - - rename_proc("throbber"); - - while (42) { - double sdiff; - pool_stats_t *stats; - char stamp[128], hashrate[16], ch; - - sleep(1); - if (ckp->quiet) - continue; - sdiff = sdata->stats.accounted_diff_shares; - stats = &sdata->stats; - suffix_string(stats->dsps1 * nonces, hashrate, 16, 3); - ch = status_chars[(counter++) & 0x3]; - get_timestamp(stamp); - if (likely(sdata->current_workbase)) { - double bdiff = sdiff / sdata->current_workbase->network_diff * 100; - - fprintf(stdout, "\33[2K\r%s %c %sH/s %.1f SPS %d users %d workers %.0f shares %.1f%% diff", - stamp, ch, hashrate, stats->sps1, stats->users + stats->remote_users, - stats->workers + stats->remote_workers, sdiff, bdiff); - } else { - fprintf(stdout, "\33[2K\r%s %c %sH/s %.1f SPS %d users %d workers %.0f shares", - stamp, ch, hashrate, stats->sps1, stats->users + stats->remote_users, - stats->workers + stats->remote_workers, sdiff); - } - fflush(stdout); - } - - return NULL; -} - -static void *zmqnotify(void *arg) -{ -#ifdef HAVE_ZMQ_H - ckpool_t *ckp = arg; - sdata_t *sdata = ckp->sdata; - void *context, *notify; - int rc; - - rename_proc("zmqnotify"); - - context = zmq_ctx_new(); - notify = zmq_socket(context, ZMQ_SUB); - if (!notify) - quit(1, "zmq_socket failed with errno %d", errno); - rc = zmq_setsockopt(notify, ZMQ_SUBSCRIBE, "hashblock", 0); - if (rc < 0) - quit(1, "zmq_setsockopt failed with errno %d", errno); - rc = zmq_connect(notify, ckp->zmqblock); - if (rc < 0) - quit(1, "zmq_connect failed with errno %d", errno); - LOGNOTICE("ZMQ connected to %s", ckp->zmqblock); - - while (42) { - zmq_msg_t message; - - do { - char hexhash[68] = {}; - int size; - - zmq_msg_init(&message); - rc = zmq_msg_recv(&message, notify, 0); - if (unlikely(rc < 0)) { - LOGWARNING("zmq_msg_recv failed with error %d", errno); - sleep(5); - zmq_msg_close(&message); - continue; - } - - size = zmq_msg_size(&message); - switch (size) { - case 9: - LOGDEBUG("ZMQ hashblock message"); - break; - case 4: - LOGDEBUG("ZMQ sequence number"); - break; - case 32: - update_base(sdata, GEN_PRIORITY); - __bin2hex(hexhash, zmq_msg_data(&message), 32); - LOGNOTICE("ZMQ block hash %s", hexhash); - break; - default: - LOGWARNING("ZMQ message size error, size = %d!", size); - break; - } - zmq_msg_close(&message); - } while (zmq_msg_more(&message)); - - LOGDEBUG("ZMQ message complete"); - } - - zmq_close(notify); - zmq_ctx_destroy (context); -#endif - pthread_detach(pthread_self()); - - return NULL; -} - -void *stratifier(void *arg) -{ - pthread_t pth_blockupdate, pth_statsupdate, pth_throbber, pth_zmqnotify; - proc_instance_t *pi = (proc_instance_t *)arg; - int threads, tvsec_diff = 0; - ckpool_t *ckp = pi->ckp; - int64_t randomiser; - sdata_t *sdata; - - rename_proc(pi->processname); - LOGWARNING("%s stratifier starting", ckp->name); - sdata = ckzalloc(sizeof(sdata_t)); - ckp->sdata = sdata; - sdata->ckp = ckp; - sdata->verbose = true; - - /* Wait for the generator to have something for us */ - while (!ckp->proxy && !ckp->generator_ready) - cksleep_ms(10); - while (ckp->remote && !ckp->connector_ready) - cksleep_ms(10); - - if (!ckp->proxy) { - if (!generator_checkaddr(ckp, ckp->btcaddress, &ckp->script, &ckp->segwit)) { - LOGEMERG("Fatal: btcaddress invalid according to bitcoind"); - goto out; - } - - /* Store this for use elsewhere */ - hex2bin(scriptsig_header_bin, scriptsig_header, 41); - sdata->txnlen = address_to_txn(sdata->txnbin, ckp->btcaddress, ckp->script, ckp->segwit); - - /* Find a valid donation address if possible */ - if (generator_checkaddr(ckp, ckp->donaddress, &ckp->donscript, &ckp->donsegwit)) { - ckp->donvalid = true; - sdata->dontxnlen = address_to_txn(sdata->dontxnbin, ckp->donaddress, ckp->donscript, ckp->donsegwit); - LOGNOTICE("BTC donation address valid %s", ckp->donaddress); - } else if (generator_checkaddr(ckp, ckp->tndonaddress, &ckp->donscript, &ckp->donsegwit)) { - ckp->donaddress = ckp->tndonaddress; - ckp->donvalid = true; - sdata->dontxnlen = address_to_txn(sdata->dontxnbin, ckp->donaddress, ckp->donscript, ckp->donsegwit); - LOGNOTICE("BTC testnet donation address valid %s", ckp->donaddress); - } else if (generator_checkaddr(ckp, ckp->rtdonaddress, &ckp->donscript, &ckp->donsegwit)) { - ckp->donaddress = ckp->rtdonaddress; - ckp->donvalid = true; - sdata->dontxnlen = address_to_txn(sdata->dontxnbin, ckp->donaddress, ckp->donscript, ckp->donsegwit); - LOGNOTICE("BTC regtest donation address valid %s", ckp->donaddress); - } else - LOGNOTICE("No valid donation address found"); - } - - randomiser = time(NULL); - sdata->enonce1_64 = htole64(randomiser); - sdata->session_id = randomiser; - /* Set the initial id to time as high bits so as to not send the same - * id on restarts */ - randomiser <<= 32; - if (!ckp->proxy) - sdata->blockchange_id = sdata->workbase_id = randomiser; - - cklock_init(&sdata->instance_lock); - cksem_init(&sdata->update_sem); - cksem_post(&sdata->update_sem); - - /* Create half as many share processing and receiving threads as there - * are CPUs */ - threads = sysconf(_SC_NPROCESSORS_ONLN) / 2 ? : 1; - sdata->updateq = create_ckmsgq(ckp, "updater", &block_update); - sdata->sshareq = create_ckmsgqs(ckp, "sprocessor", &sshare_process, threads); - sdata->ssends = create_ckmsgqs(ckp, "ssender", &ssend_process, threads); - sdata->sauthq = create_ckmsgq(ckp, "authoriser", &sauth_process); - sdata->stxnq = create_ckmsgq(ckp, "stxnq", &send_transactions); - sdata->srecvs = create_ckmsgqs(ckp, "sreceiver", &srecv_process, threads); - create_pthread(&pth_throbber, throbber, ckp); - read_poolstats(ckp, &tvsec_diff); - read_userstats(ckp, sdata, tvsec_diff); - - /* Set diff impossibly large until we know the network diff */ - sdata->stats.network_diff = ~0ULL; - - cklock_init(&sdata->txn_lock); - cklock_init(&sdata->workbase_lock); - if (!ckp->proxy) - create_pthread(&pth_blockupdate, blockupdate, ckp); - else { - mutex_init(&sdata->proxy_lock); - } - - mutex_init(&sdata->stats_lock); - mutex_init(&sdata->uastats_lock); - if (!ckp->passthrough || ckp->node) - create_pthread(&pth_statsupdate, statsupdate, ckp); - - mutex_init(&sdata->share_lock); - if (!ckp->proxy) - create_pthread(&pth_zmqnotify, zmqnotify, ckp); - - ckp->stratifier_ready = true; - LOGWARNING("%s stratifier ready", ckp->name); - - stratum_loop(ckp, pi); -out: - /* We should never get here unless there's a fatal error */ - LOGEMERG("Stratifier failure, shutting down"); - exit(1); - return NULL; -} diff --git a/solo-ckpool-source/src/stratifier.h b/solo-ckpool-source/src/stratifier.h deleted file mode 100644 index 6a20d4f..0000000 --- a/solo-ckpool-source/src/stratifier.h +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2014-2017,2023 Con Kolivas - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the Free - * Software Foundation; either version 3 of the License, or (at your option) - * any later version. See COPYING for more details. - */ - -#ifndef STRATIFIER_H -#define STRATIFIER_H - -/* Generic structure for both workbase in stratifier and gbtbase in generator */ -struct genwork { - /* Hash table data */ - UT_hash_handle hh; - - /* The next two fields need to be consecutive as both of them are - * used as the key for their hashtable entry in remote_workbases */ - int64_t id; - /* The client id this workinfo came from if remote */ - int64_t client_id; - - char idstring[20]; - - /* How many readers we currently have of this workbase, set - * under write workbase_lock */ - int readcount; - - /* The id a remote workinfo is mapped to locally */ - int64_t mapped_id; - - ts_t gentime; - tv_t retired; - - /* GBT/shared variables */ - char target[68]; - double diff; - double network_diff; - uint32_t version; - uint32_t curtime; - char prevhash[68]; - char ntime[12]; - uint32_t ntime32; - char bbversion[12]; - char nbit[12]; - uint64_t coinbasevalue; - int height; - char *flags; - int txns; - char *txn_data; - char *txn_hashes; - char witnessdata[80]; //null-terminated ascii - bool insert_witness; - int merkles; - char merklehash[16][68]; - char merklebin[16][32]; - json_t *merkle_array; - - /* Template variables, lengths are binary lengths! */ - char *coinb1; // coinbase1 - uchar *coinb1bin; - int coinb1len; // length of above - - char enonce1const[32]; // extranonce1 section that is constant - uchar enonce1constbin[16]; - int enonce1constlen; // length of above - usually zero unless proxying - int enonce1varlen; // length of unique extranonce1 string for each worker - usually 8 - - int enonce2varlen; // length of space left for extranonce2 - usually 8 unless proxying - - char *coinb2; // coinbase2 - uchar *coinb2bin; - int coinb2len; // length of above - char *coinb3bin; // coinbase3 for variable coinb2len - int coinb3len; // length of above - - /* Cached header binary */ - char headerbin[112]; - - char *logdir; - - ckpool_t *ckp; - bool proxy; /* This workbase is proxied work */ - - bool incomplete; /* This is a remote workinfo without all the txn data */ - - json_t *json; /* getblocktemplate json */ -}; - -void parse_remote_txns(ckpool_t *ckp, const json_t *val); -#define parse_upstream_txns(ckp, val) parse_remote_txns(ckp, val) -void parse_upstream_auth(ckpool_t *ckp, json_t *val); -void parse_upstream_workinfo(ckpool_t *ckp, json_t *val); -void parse_upstream_block(ckpool_t *ckp, json_t *val); -void parse_upstream_reqtxns(ckpool_t *ckp, json_t *val); -char *stratifier_stats(ckpool_t *ckp, void *data); -void _stratifier_add_recv(ckpool_t *ckp, json_t *val, const char *file, const char *func, const int line); -#define stratifier_add_recv(ckp, val) _stratifier_add_recv(ckp, val, __FILE__, __func__, __LINE__) -void *stratifier(void *arg); - -#endif /* STRATIFIER_H */ diff --git a/solo-ckpool-source/src/uthash.h b/solo-ckpool-source/src/uthash.h deleted file mode 100644 index 44de601..0000000 --- a/solo-ckpool-source/src/uthash.h +++ /dev/null @@ -1,1144 +0,0 @@ -/* -Copyright (c) 2003-2022, Troy D. Hanson https://troydhanson.github.io/uthash/ -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER -OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -#ifndef UTHASH_H -#define UTHASH_H - -#define UTHASH_VERSION 2.3.0 - -#include /* memcmp, memset, strlen */ -#include /* ptrdiff_t */ -#include /* exit */ - -#if defined(HASH_DEFINE_OWN_STDINT) && HASH_DEFINE_OWN_STDINT -/* This codepath is provided for backward compatibility, but I plan to remove it. */ -#warning "HASH_DEFINE_OWN_STDINT is deprecated; please use HASH_NO_STDINT instead" -typedef unsigned int uint32_t; -typedef unsigned char uint8_t; -#elif defined(HASH_NO_STDINT) && HASH_NO_STDINT -#else -#include /* uint8_t, uint32_t */ -#endif - -/* These macros use decltype or the earlier __typeof GNU extension. - As decltype is only available in newer compilers (VS2010 or gcc 4.3+ - when compiling c++ source) this code uses whatever method is needed - or, for VS2008 where neither is available, uses casting workarounds. */ -#if !defined(DECLTYPE) && !defined(NO_DECLTYPE) -#if defined(_MSC_VER) /* MS compiler */ -#if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */ -#define DECLTYPE(x) (decltype(x)) -#else /* VS2008 or older (or VS2010 in C mode) */ -#define NO_DECLTYPE -#endif -#elif defined(__MCST__) /* Elbrus C Compiler */ -#define DECLTYPE(x) (__typeof(x)) -#elif defined(__BORLANDC__) || defined(__ICCARM__) || defined(__LCC__) || defined(__WATCOMC__) -#define NO_DECLTYPE -#else /* GNU, Sun and other compilers */ -#define DECLTYPE(x) (__typeof(x)) -#endif -#endif - -#ifdef NO_DECLTYPE -#define DECLTYPE(x) -#define DECLTYPE_ASSIGN(dst,src) \ -do { \ - char **_da_dst = (char**)(&(dst)); \ - *_da_dst = (char*)(src); \ -} while (0) -#else -#define DECLTYPE_ASSIGN(dst,src) \ -do { \ - (dst) = DECLTYPE(dst)(src); \ -} while (0) -#endif - -#ifndef uthash_malloc -#define uthash_malloc(sz) malloc(sz) /* malloc fcn */ -#endif -#ifndef uthash_free -#define uthash_free(ptr,sz) free(ptr) /* free fcn */ -#endif -#ifndef uthash_bzero -#define uthash_bzero(a,n) memset(a,'\0',n) -#endif -#ifndef uthash_strlen -#define uthash_strlen(s) strlen(s) -#endif - -#ifndef HASH_FUNCTION -#define HASH_FUNCTION(keyptr,keylen,hashv) HASH_JEN(keyptr, keylen, hashv) -#endif - -#ifndef HASH_KEYCMP -#define HASH_KEYCMP(a,b,n) memcmp(a,b,n) -#endif - -#ifndef uthash_noexpand_fyi -#define uthash_noexpand_fyi(tbl) /* can be defined to log noexpand */ -#endif -#ifndef uthash_expand_fyi -#define uthash_expand_fyi(tbl) /* can be defined to log expands */ -#endif - -#ifndef HASH_NONFATAL_OOM -#define HASH_NONFATAL_OOM 0 -#endif - -#if HASH_NONFATAL_OOM -/* malloc failures can be recovered from */ - -#ifndef uthash_nonfatal_oom -#define uthash_nonfatal_oom(obj) do {} while (0) /* non-fatal OOM error */ -#endif - -#define HASH_RECORD_OOM(oomed) do { (oomed) = 1; } while (0) -#define IF_HASH_NONFATAL_OOM(x) x - -#else -/* malloc failures result in lost memory, hash tables are unusable */ - -#ifndef uthash_fatal -#define uthash_fatal(msg) exit(-1) /* fatal OOM error */ -#endif - -#define HASH_RECORD_OOM(oomed) uthash_fatal("out of memory") -#define IF_HASH_NONFATAL_OOM(x) - -#endif - -/* initial number of buckets */ -#define HASH_INITIAL_NUM_BUCKETS 32U /* initial number of buckets */ -#define HASH_INITIAL_NUM_BUCKETS_LOG2 5U /* lg2 of initial number of buckets */ -#define HASH_BKT_CAPACITY_THRESH 10U /* expand when bucket count reaches */ - -/* calculate the element whose hash handle address is hhp */ -#define ELMT_FROM_HH(tbl,hhp) ((void*)(((char*)(hhp)) - ((tbl)->hho))) -/* calculate the hash handle from element address elp */ -#define HH_FROM_ELMT(tbl,elp) ((UT_hash_handle*)(void*)(((char*)(elp)) + ((tbl)->hho))) - -#define HASH_ROLLBACK_BKT(hh, head, itemptrhh) \ -do { \ - struct UT_hash_handle *_hd_hh_item = (itemptrhh); \ - unsigned _hd_bkt; \ - HASH_TO_BKT(_hd_hh_item->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \ - (head)->hh.tbl->buckets[_hd_bkt].count++; \ - _hd_hh_item->hh_next = NULL; \ - _hd_hh_item->hh_prev = NULL; \ -} while (0) - -#define HASH_VALUE(keyptr,keylen,hashv) \ -do { \ - HASH_FUNCTION(keyptr, keylen, hashv); \ -} while (0) - -#define HASH_FIND_BYHASHVALUE(hh,head,keyptr,keylen,hashval,out) \ -do { \ - (out) = NULL; \ - if (head) { \ - unsigned _hf_bkt; \ - HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _hf_bkt); \ - if (HASH_BLOOM_TEST((head)->hh.tbl, hashval) != 0) { \ - HASH_FIND_IN_BKT((head)->hh.tbl, hh, (head)->hh.tbl->buckets[ _hf_bkt ], keyptr, keylen, hashval, out); \ - } \ - } \ -} while (0) - -#define HASH_FIND(hh,head,keyptr,keylen,out) \ -do { \ - (out) = NULL; \ - if (head) { \ - unsigned _hf_hashv; \ - HASH_VALUE(keyptr, keylen, _hf_hashv); \ - HASH_FIND_BYHASHVALUE(hh, head, keyptr, keylen, _hf_hashv, out); \ - } \ -} while (0) - -#ifdef HASH_BLOOM -#define HASH_BLOOM_BITLEN (1UL << HASH_BLOOM) -#define HASH_BLOOM_BYTELEN (HASH_BLOOM_BITLEN/8UL) + (((HASH_BLOOM_BITLEN%8UL)!=0UL) ? 1UL : 0UL) -#define HASH_BLOOM_MAKE(tbl,oomed) \ -do { \ - (tbl)->bloom_nbits = HASH_BLOOM; \ - (tbl)->bloom_bv = (uint8_t*)uthash_malloc(HASH_BLOOM_BYTELEN); \ - if (!(tbl)->bloom_bv) { \ - HASH_RECORD_OOM(oomed); \ - } else { \ - uthash_bzero((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \ - (tbl)->bloom_sig = HASH_BLOOM_SIGNATURE; \ - } \ -} while (0) - -#define HASH_BLOOM_FREE(tbl) \ -do { \ - uthash_free((tbl)->bloom_bv, HASH_BLOOM_BYTELEN); \ -} while (0) - -#define HASH_BLOOM_BITSET(bv,idx) (bv[(idx)/8U] |= (1U << ((idx)%8U))) -#define HASH_BLOOM_BITTEST(bv,idx) (bv[(idx)/8U] & (1U << ((idx)%8U))) - -#define HASH_BLOOM_ADD(tbl,hashv) \ - HASH_BLOOM_BITSET((tbl)->bloom_bv, ((hashv) & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U))) - -#define HASH_BLOOM_TEST(tbl,hashv) \ - HASH_BLOOM_BITTEST((tbl)->bloom_bv, ((hashv) & (uint32_t)((1UL << (tbl)->bloom_nbits) - 1U))) - -#else -#define HASH_BLOOM_MAKE(tbl,oomed) -#define HASH_BLOOM_FREE(tbl) -#define HASH_BLOOM_ADD(tbl,hashv) -#define HASH_BLOOM_TEST(tbl,hashv) (1) -#define HASH_BLOOM_BYTELEN 0U -#endif - -#define HASH_MAKE_TABLE(hh,head,oomed) \ -do { \ - (head)->hh.tbl = (UT_hash_table*)uthash_malloc(sizeof(UT_hash_table)); \ - if (!(head)->hh.tbl) { \ - HASH_RECORD_OOM(oomed); \ - } else { \ - uthash_bzero((head)->hh.tbl, sizeof(UT_hash_table)); \ - (head)->hh.tbl->tail = &((head)->hh); \ - (head)->hh.tbl->num_buckets = HASH_INITIAL_NUM_BUCKETS; \ - (head)->hh.tbl->log2_num_buckets = HASH_INITIAL_NUM_BUCKETS_LOG2; \ - (head)->hh.tbl->hho = (char*)(&(head)->hh) - (char*)(head); \ - (head)->hh.tbl->buckets = (UT_hash_bucket*)uthash_malloc( \ - HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \ - (head)->hh.tbl->signature = HASH_SIGNATURE; \ - if (!(head)->hh.tbl->buckets) { \ - HASH_RECORD_OOM(oomed); \ - uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ - } else { \ - uthash_bzero((head)->hh.tbl->buckets, \ - HASH_INITIAL_NUM_BUCKETS * sizeof(struct UT_hash_bucket)); \ - HASH_BLOOM_MAKE((head)->hh.tbl, oomed); \ - IF_HASH_NONFATAL_OOM( \ - if (oomed) { \ - uthash_free((head)->hh.tbl->buckets, \ - HASH_INITIAL_NUM_BUCKETS*sizeof(struct UT_hash_bucket)); \ - uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ - } \ - ) \ - } \ - } \ -} while (0) - -#define HASH_REPLACE_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,replaced,cmpfcn) \ -do { \ - (replaced) = NULL; \ - HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \ - if (replaced) { \ - HASH_DELETE(hh, head, replaced); \ - } \ - HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn); \ -} while (0) - -#define HASH_REPLACE_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add,replaced) \ -do { \ - (replaced) = NULL; \ - HASH_FIND_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, replaced); \ - if (replaced) { \ - HASH_DELETE(hh, head, replaced); \ - } \ - HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add); \ -} while (0) - -#define HASH_REPLACE(hh,head,fieldname,keylen_in,add,replaced) \ -do { \ - unsigned _hr_hashv; \ - HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \ - HASH_REPLACE_BYHASHVALUE(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced); \ -} while (0) - -#define HASH_REPLACE_INORDER(hh,head,fieldname,keylen_in,add,replaced,cmpfcn) \ -do { \ - unsigned _hr_hashv; \ - HASH_VALUE(&((add)->fieldname), keylen_in, _hr_hashv); \ - HASH_REPLACE_BYHASHVALUE_INORDER(hh, head, fieldname, keylen_in, _hr_hashv, add, replaced, cmpfcn); \ -} while (0) - -#define HASH_APPEND_LIST(hh, head, add) \ -do { \ - (add)->hh.next = NULL; \ - (add)->hh.prev = ELMT_FROM_HH((head)->hh.tbl, (head)->hh.tbl->tail); \ - (head)->hh.tbl->tail->next = (add); \ - (head)->hh.tbl->tail = &((add)->hh); \ -} while (0) - -#define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \ -do { \ - do { \ - if (cmpfcn(DECLTYPE(head)(_hs_iter), add) > 0) { \ - break; \ - } \ - } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \ -} while (0) - -#ifdef NO_DECLTYPE -#undef HASH_AKBI_INNER_LOOP -#define HASH_AKBI_INNER_LOOP(hh,head,add,cmpfcn) \ -do { \ - char *_hs_saved_head = (char*)(head); \ - do { \ - DECLTYPE_ASSIGN(head, _hs_iter); \ - if (cmpfcn(head, add) > 0) { \ - DECLTYPE_ASSIGN(head, _hs_saved_head); \ - break; \ - } \ - DECLTYPE_ASSIGN(head, _hs_saved_head); \ - } while ((_hs_iter = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->next)); \ -} while (0) -#endif - -#if HASH_NONFATAL_OOM - -#define HASH_ADD_TO_TABLE(hh,head,keyptr,keylen_in,hashval,add,oomed) \ -do { \ - if (!(oomed)) { \ - unsigned _ha_bkt; \ - (head)->hh.tbl->num_items++; \ - HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \ - HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], hh, &(add)->hh, oomed); \ - if (oomed) { \ - HASH_ROLLBACK_BKT(hh, head, &(add)->hh); \ - HASH_DELETE_HH(hh, head, &(add)->hh); \ - (add)->hh.tbl = NULL; \ - uthash_nonfatal_oom(add); \ - } else { \ - HASH_BLOOM_ADD((head)->hh.tbl, hashval); \ - HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \ - } \ - } else { \ - (add)->hh.tbl = NULL; \ - uthash_nonfatal_oom(add); \ - } \ -} while (0) - -#else - -#define HASH_ADD_TO_TABLE(hh,head,keyptr,keylen_in,hashval,add,oomed) \ -do { \ - unsigned _ha_bkt; \ - (head)->hh.tbl->num_items++; \ - HASH_TO_BKT(hashval, (head)->hh.tbl->num_buckets, _ha_bkt); \ - HASH_ADD_TO_BKT((head)->hh.tbl->buckets[_ha_bkt], hh, &(add)->hh, oomed); \ - HASH_BLOOM_ADD((head)->hh.tbl, hashval); \ - HASH_EMIT_KEY(hh, head, keyptr, keylen_in); \ -} while (0) - -#endif - - -#define HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh,head,keyptr,keylen_in,hashval,add,cmpfcn) \ -do { \ - IF_HASH_NONFATAL_OOM( int _ha_oomed = 0; ) \ - (add)->hh.hashv = (hashval); \ - (add)->hh.key = (char*) (keyptr); \ - (add)->hh.keylen = (unsigned) (keylen_in); \ - if (!(head)) { \ - (add)->hh.next = NULL; \ - (add)->hh.prev = NULL; \ - HASH_MAKE_TABLE(hh, add, _ha_oomed); \ - IF_HASH_NONFATAL_OOM( if (!_ha_oomed) { ) \ - (head) = (add); \ - IF_HASH_NONFATAL_OOM( } ) \ - } else { \ - void *_hs_iter = (head); \ - (add)->hh.tbl = (head)->hh.tbl; \ - HASH_AKBI_INNER_LOOP(hh, head, add, cmpfcn); \ - if (_hs_iter) { \ - (add)->hh.next = _hs_iter; \ - if (((add)->hh.prev = HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev)) { \ - HH_FROM_ELMT((head)->hh.tbl, (add)->hh.prev)->next = (add); \ - } else { \ - (head) = (add); \ - } \ - HH_FROM_ELMT((head)->hh.tbl, _hs_iter)->prev = (add); \ - } else { \ - HASH_APPEND_LIST(hh, head, add); \ - } \ - } \ - HASH_ADD_TO_TABLE(hh, head, keyptr, keylen_in, hashval, add, _ha_oomed); \ - HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE_INORDER"); \ -} while (0) - -#define HASH_ADD_KEYPTR_INORDER(hh,head,keyptr,keylen_in,add,cmpfcn) \ -do { \ - unsigned _hs_hashv; \ - HASH_VALUE(keyptr, keylen_in, _hs_hashv); \ - HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, keyptr, keylen_in, _hs_hashv, add, cmpfcn); \ -} while (0) - -#define HASH_ADD_BYHASHVALUE_INORDER(hh,head,fieldname,keylen_in,hashval,add,cmpfcn) \ - HASH_ADD_KEYPTR_BYHASHVALUE_INORDER(hh, head, &((add)->fieldname), keylen_in, hashval, add, cmpfcn) - -#define HASH_ADD_INORDER(hh,head,fieldname,keylen_in,add,cmpfcn) \ - HASH_ADD_KEYPTR_INORDER(hh, head, &((add)->fieldname), keylen_in, add, cmpfcn) - -#define HASH_ADD_KEYPTR_BYHASHVALUE(hh,head,keyptr,keylen_in,hashval,add) \ -do { \ - IF_HASH_NONFATAL_OOM( int _ha_oomed = 0; ) \ - (add)->hh.hashv = (hashval); \ - (add)->hh.key = (const void*) (keyptr); \ - (add)->hh.keylen = (unsigned) (keylen_in); \ - if (!(head)) { \ - (add)->hh.next = NULL; \ - (add)->hh.prev = NULL; \ - HASH_MAKE_TABLE(hh, add, _ha_oomed); \ - IF_HASH_NONFATAL_OOM( if (!_ha_oomed) { ) \ - (head) = (add); \ - IF_HASH_NONFATAL_OOM( } ) \ - } else { \ - (add)->hh.tbl = (head)->hh.tbl; \ - HASH_APPEND_LIST(hh, head, add); \ - } \ - HASH_ADD_TO_TABLE(hh, head, keyptr, keylen_in, hashval, add, _ha_oomed); \ - HASH_FSCK(hh, head, "HASH_ADD_KEYPTR_BYHASHVALUE"); \ -} while (0) - -#define HASH_ADD_KEYPTR(hh,head,keyptr,keylen_in,add) \ -do { \ - unsigned _ha_hashv; \ - HASH_VALUE(keyptr, keylen_in, _ha_hashv); \ - HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, keyptr, keylen_in, _ha_hashv, add); \ -} while (0) - -#define HASH_ADD_BYHASHVALUE(hh,head,fieldname,keylen_in,hashval,add) \ - HASH_ADD_KEYPTR_BYHASHVALUE(hh, head, &((add)->fieldname), keylen_in, hashval, add) - -#define HASH_ADD(hh,head,fieldname,keylen_in,add) \ - HASH_ADD_KEYPTR(hh, head, &((add)->fieldname), keylen_in, add) - -#define HASH_TO_BKT(hashv,num_bkts,bkt) \ -do { \ - bkt = ((hashv) & ((num_bkts) - 1U)); \ -} while (0) - -/* delete "delptr" from the hash table. - * "the usual" patch-up process for the app-order doubly-linked-list. - * The use of _hd_hh_del below deserves special explanation. - * These used to be expressed using (delptr) but that led to a bug - * if someone used the same symbol for the head and deletee, like - * HASH_DELETE(hh,users,users); - * We want that to work, but by changing the head (users) below - * we were forfeiting our ability to further refer to the deletee (users) - * in the patch-up process. Solution: use scratch space to - * copy the deletee pointer, then the latter references are via that - * scratch pointer rather than through the repointed (users) symbol. - */ -#define HASH_DELETE(hh,head,delptr) \ - HASH_DELETE_HH(hh, head, &(delptr)->hh) - -#define HASH_DELETE_HH(hh,head,delptrhh) \ -do { \ - const struct UT_hash_handle *_hd_hh_del = (delptrhh); \ - if ((_hd_hh_del->prev == NULL) && (_hd_hh_del->next == NULL)) { \ - HASH_BLOOM_FREE((head)->hh.tbl); \ - uthash_free((head)->hh.tbl->buckets, \ - (head)->hh.tbl->num_buckets * sizeof(struct UT_hash_bucket)); \ - uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ - (head) = NULL; \ - } else { \ - unsigned _hd_bkt; \ - if (_hd_hh_del == (head)->hh.tbl->tail) { \ - (head)->hh.tbl->tail = HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->prev); \ - } \ - if (_hd_hh_del->prev != NULL) { \ - HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->prev)->next = _hd_hh_del->next; \ - } else { \ - DECLTYPE_ASSIGN(head, _hd_hh_del->next); \ - } \ - if (_hd_hh_del->next != NULL) { \ - HH_FROM_ELMT((head)->hh.tbl, _hd_hh_del->next)->prev = _hd_hh_del->prev; \ - } \ - HASH_TO_BKT(_hd_hh_del->hashv, (head)->hh.tbl->num_buckets, _hd_bkt); \ - HASH_DEL_IN_BKT((head)->hh.tbl->buckets[_hd_bkt], _hd_hh_del); \ - (head)->hh.tbl->num_items--; \ - } \ - HASH_FSCK(hh, head, "HASH_DELETE_HH"); \ -} while (0) - -/* convenience forms of HASH_FIND/HASH_ADD/HASH_DEL */ -#define HASH_FIND_STR(head,findstr,out) \ -do { \ - unsigned _uthash_hfstr_keylen = (unsigned)uthash_strlen(findstr); \ - HASH_FIND(hh, head, findstr, _uthash_hfstr_keylen, out); \ -} while (0) -#define HASH_ADD_STR(head,strfield,add) \ -do { \ - unsigned _uthash_hastr_keylen = (unsigned)uthash_strlen((add)->strfield); \ - HASH_ADD(hh, head, strfield[0], _uthash_hastr_keylen, add); \ -} while (0) -#define HASH_REPLACE_STR(head,strfield,add,replaced) \ -do { \ - unsigned _uthash_hrstr_keylen = (unsigned)uthash_strlen((add)->strfield); \ - HASH_REPLACE(hh, head, strfield[0], _uthash_hrstr_keylen, add, replaced); \ -} while (0) -#define HASH_FIND_INT(head,findint,out) \ - HASH_FIND(hh,head,findint,sizeof(int),out) -#define HASH_ADD_INT(head,intfield,add) \ - HASH_ADD(hh,head,intfield,sizeof(int),add) -#define HASH_REPLACE_INT(head,intfield,add,replaced) \ - HASH_REPLACE(hh,head,intfield,sizeof(int),add,replaced) -#define HASH_FIND_I64(head,findint,out) \ - HASH_FIND(hh,head,findint,sizeof(int64_t),out) -#define HASH_ADD_I64(head,intfield,add) \ - HASH_ADD(hh,head,intfield,sizeof(int64_t),add) -#define HASH_FIND_PTR(head,findptr,out) \ - HASH_FIND(hh,head,findptr,sizeof(void *),out) -#define HASH_ADD_PTR(head,ptrfield,add) \ - HASH_ADD(hh,head,ptrfield,sizeof(void *),add) -#define HASH_REPLACE_PTR(head,ptrfield,add,replaced) \ - HASH_REPLACE(hh,head,ptrfield,sizeof(void *),add,replaced) -#define HASH_DEL(head,delptr) \ - HASH_DELETE(hh,head,delptr) - -/* HASH_FSCK checks hash integrity on every add/delete when HASH_DEBUG is defined. - * This is for uthash developer only; it compiles away if HASH_DEBUG isn't defined. - */ -#ifdef HASH_DEBUG -#include /* fprintf, stderr */ -#define HASH_OOPS(...) do { fprintf(stderr, __VA_ARGS__); exit(-1); } while (0) -#define HASH_FSCK(hh,head,where) \ -do { \ - struct UT_hash_handle *_thh; \ - if (head) { \ - unsigned _bkt_i; \ - unsigned _count = 0; \ - char *_prev; \ - for (_bkt_i = 0; _bkt_i < (head)->hh.tbl->num_buckets; ++_bkt_i) { \ - unsigned _bkt_count = 0; \ - _thh = (head)->hh.tbl->buckets[_bkt_i].hh_head; \ - _prev = NULL; \ - while (_thh) { \ - if (_prev != (char*)(_thh->hh_prev)) { \ - HASH_OOPS("%s: invalid hh_prev %p, actual %p\n", \ - (where), (void*)_thh->hh_prev, (void*)_prev); \ - } \ - _bkt_count++; \ - _prev = (char*)(_thh); \ - _thh = _thh->hh_next; \ - } \ - _count += _bkt_count; \ - if ((head)->hh.tbl->buckets[_bkt_i].count != _bkt_count) { \ - HASH_OOPS("%s: invalid bucket count %u, actual %u\n", \ - (where), (head)->hh.tbl->buckets[_bkt_i].count, _bkt_count); \ - } \ - } \ - if (_count != (head)->hh.tbl->num_items) { \ - HASH_OOPS("%s: invalid hh item count %u, actual %u\n", \ - (where), (head)->hh.tbl->num_items, _count); \ - } \ - _count = 0; \ - _prev = NULL; \ - _thh = &(head)->hh; \ - while (_thh) { \ - _count++; \ - if (_prev != (char*)_thh->prev) { \ - HASH_OOPS("%s: invalid prev %p, actual %p\n", \ - (where), (void*)_thh->prev, (void*)_prev); \ - } \ - _prev = (char*)ELMT_FROM_HH((head)->hh.tbl, _thh); \ - _thh = (_thh->next ? HH_FROM_ELMT((head)->hh.tbl, _thh->next) : NULL); \ - } \ - if (_count != (head)->hh.tbl->num_items) { \ - HASH_OOPS("%s: invalid app item count %u, actual %u\n", \ - (where), (head)->hh.tbl->num_items, _count); \ - } \ - } \ -} while (0) -#else -#define HASH_FSCK(hh,head,where) -#endif - -/* When compiled with -DHASH_EMIT_KEYS, length-prefixed keys are emitted to - * the descriptor to which this macro is defined for tuning the hash function. - * The app can #include to get the prototype for write(2). */ -#ifdef HASH_EMIT_KEYS -#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) \ -do { \ - unsigned _klen = fieldlen; \ - write(HASH_EMIT_KEYS, &_klen, sizeof(_klen)); \ - write(HASH_EMIT_KEYS, keyptr, (unsigned long)fieldlen); \ -} while (0) -#else -#define HASH_EMIT_KEY(hh,head,keyptr,fieldlen) -#endif - -/* The Bernstein hash function, used in Perl prior to v5.6. Note (x<<5+x)=x*33. */ -#define HASH_BER(key,keylen,hashv) \ -do { \ - unsigned _hb_keylen = (unsigned)keylen; \ - const unsigned char *_hb_key = (const unsigned char*)(key); \ - (hashv) = 0; \ - while (_hb_keylen-- != 0U) { \ - (hashv) = (((hashv) << 5) + (hashv)) + *_hb_key++; \ - } \ -} while (0) - - -/* SAX/FNV/OAT/JEN hash functions are macro variants of those listed at - * http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx - * (archive link: https://archive.is/Ivcan ) - */ -#define HASH_SAX(key,keylen,hashv) \ -do { \ - unsigned _sx_i; \ - const unsigned char *_hs_key = (const unsigned char*)(key); \ - hashv = 0; \ - for (_sx_i=0; _sx_i < keylen; _sx_i++) { \ - hashv ^= (hashv << 5) + (hashv >> 2) + _hs_key[_sx_i]; \ - } \ -} while (0) -/* FNV-1a variation */ -#define HASH_FNV(key,keylen,hashv) \ -do { \ - unsigned _fn_i; \ - const unsigned char *_hf_key = (const unsigned char*)(key); \ - (hashv) = 2166136261U; \ - for (_fn_i=0; _fn_i < keylen; _fn_i++) { \ - hashv = hashv ^ _hf_key[_fn_i]; \ - hashv = hashv * 16777619U; \ - } \ -} while (0) - -#define HASH_OAT(key,keylen,hashv) \ -do { \ - unsigned _ho_i; \ - const unsigned char *_ho_key=(const unsigned char*)(key); \ - hashv = 0; \ - for(_ho_i=0; _ho_i < keylen; _ho_i++) { \ - hashv += _ho_key[_ho_i]; \ - hashv += (hashv << 10); \ - hashv ^= (hashv >> 6); \ - } \ - hashv += (hashv << 3); \ - hashv ^= (hashv >> 11); \ - hashv += (hashv << 15); \ -} while (0) - -#define HASH_JEN_MIX(a,b,c) \ -do { \ - a -= b; a -= c; a ^= ( c >> 13 ); \ - b -= c; b -= a; b ^= ( a << 8 ); \ - c -= a; c -= b; c ^= ( b >> 13 ); \ - a -= b; a -= c; a ^= ( c >> 12 ); \ - b -= c; b -= a; b ^= ( a << 16 ); \ - c -= a; c -= b; c ^= ( b >> 5 ); \ - a -= b; a -= c; a ^= ( c >> 3 ); \ - b -= c; b -= a; b ^= ( a << 10 ); \ - c -= a; c -= b; c ^= ( b >> 15 ); \ -} while (0) - -#define HASH_JEN(key,keylen,hashv) \ -do { \ - unsigned _hj_i,_hj_j,_hj_k; \ - unsigned const char *_hj_key=(unsigned const char*)(key); \ - hashv = 0xfeedbeefu; \ - _hj_i = _hj_j = 0x9e3779b9u; \ - _hj_k = (unsigned)(keylen); \ - while (_hj_k >= 12U) { \ - _hj_i += (_hj_key[0] + ( (unsigned)_hj_key[1] << 8 ) \ - + ( (unsigned)_hj_key[2] << 16 ) \ - + ( (unsigned)_hj_key[3] << 24 ) ); \ - _hj_j += (_hj_key[4] + ( (unsigned)_hj_key[5] << 8 ) \ - + ( (unsigned)_hj_key[6] << 16 ) \ - + ( (unsigned)_hj_key[7] << 24 ) ); \ - hashv += (_hj_key[8] + ( (unsigned)_hj_key[9] << 8 ) \ - + ( (unsigned)_hj_key[10] << 16 ) \ - + ( (unsigned)_hj_key[11] << 24 ) ); \ - \ - HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ - \ - _hj_key += 12; \ - _hj_k -= 12U; \ - } \ - hashv += (unsigned)(keylen); \ - switch ( _hj_k ) { \ - case 11: hashv += ( (unsigned)_hj_key[10] << 24 ); /* FALLTHROUGH */ \ - case 10: hashv += ( (unsigned)_hj_key[9] << 16 ); /* FALLTHROUGH */ \ - case 9: hashv += ( (unsigned)_hj_key[8] << 8 ); /* FALLTHROUGH */ \ - case 8: _hj_j += ( (unsigned)_hj_key[7] << 24 ); /* FALLTHROUGH */ \ - case 7: _hj_j += ( (unsigned)_hj_key[6] << 16 ); /* FALLTHROUGH */ \ - case 6: _hj_j += ( (unsigned)_hj_key[5] << 8 ); /* FALLTHROUGH */ \ - case 5: _hj_j += _hj_key[4]; /* FALLTHROUGH */ \ - case 4: _hj_i += ( (unsigned)_hj_key[3] << 24 ); /* FALLTHROUGH */ \ - case 3: _hj_i += ( (unsigned)_hj_key[2] << 16 ); /* FALLTHROUGH */ \ - case 2: _hj_i += ( (unsigned)_hj_key[1] << 8 ); /* FALLTHROUGH */ \ - case 1: _hj_i += _hj_key[0]; /* FALLTHROUGH */ \ - default: ; \ - } \ - HASH_JEN_MIX(_hj_i, _hj_j, hashv); \ -} while (0) - -/* The Paul Hsieh hash function */ -#undef get16bits -#if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \ - || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__) -#define get16bits(d) (*((const uint16_t *) (d))) -#endif - -#if !defined (get16bits) -#define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8) \ - +(uint32_t)(((const uint8_t *)(d))[0]) ) -#endif -#define HASH_SFH(key,keylen,hashv) \ -do { \ - unsigned const char *_sfh_key=(unsigned const char*)(key); \ - uint32_t _sfh_tmp, _sfh_len = (uint32_t)keylen; \ - \ - unsigned _sfh_rem = _sfh_len & 3U; \ - _sfh_len >>= 2; \ - hashv = 0xcafebabeu; \ - \ - /* Main loop */ \ - for (;_sfh_len > 0U; _sfh_len--) { \ - hashv += get16bits (_sfh_key); \ - _sfh_tmp = ((uint32_t)(get16bits (_sfh_key+2)) << 11) ^ hashv; \ - hashv = (hashv << 16) ^ _sfh_tmp; \ - _sfh_key += 2U*sizeof (uint16_t); \ - hashv += hashv >> 11; \ - } \ - \ - /* Handle end cases */ \ - switch (_sfh_rem) { \ - case 3: hashv += get16bits (_sfh_key); \ - hashv ^= hashv << 16; \ - hashv ^= (uint32_t)(_sfh_key[sizeof (uint16_t)]) << 18; \ - hashv += hashv >> 11; \ - break; \ - case 2: hashv += get16bits (_sfh_key); \ - hashv ^= hashv << 11; \ - hashv += hashv >> 17; \ - break; \ - case 1: hashv += *_sfh_key; \ - hashv ^= hashv << 10; \ - hashv += hashv >> 1; \ - break; \ - default: ; \ - } \ - \ - /* Force "avalanching" of final 127 bits */ \ - hashv ^= hashv << 3; \ - hashv += hashv >> 5; \ - hashv ^= hashv << 4; \ - hashv += hashv >> 17; \ - hashv ^= hashv << 25; \ - hashv += hashv >> 6; \ -} while (0) - -/* iterate over items in a known bucket to find desired item */ -#define HASH_FIND_IN_BKT(tbl,hh,head,keyptr,keylen_in,hashval,out) \ -do { \ - if ((head).hh_head != NULL) { \ - DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (head).hh_head)); \ - } else { \ - (out) = NULL; \ - } \ - while ((out) != NULL) { \ - if ((out)->hh.hashv == (hashval) && (out)->hh.keylen == (keylen_in)) { \ - if (HASH_KEYCMP((out)->hh.key, keyptr, keylen_in) == 0) { \ - break; \ - } \ - } \ - if ((out)->hh.hh_next != NULL) { \ - DECLTYPE_ASSIGN(out, ELMT_FROM_HH(tbl, (out)->hh.hh_next)); \ - } else { \ - (out) = NULL; \ - } \ - } \ -} while (0) - -/* add an item to a bucket */ -#define HASH_ADD_TO_BKT(head,hh,addhh,oomed) \ -do { \ - UT_hash_bucket *_ha_head = &(head); \ - _ha_head->count++; \ - (addhh)->hh_next = _ha_head->hh_head; \ - (addhh)->hh_prev = NULL; \ - if (_ha_head->hh_head != NULL) { \ - _ha_head->hh_head->hh_prev = (addhh); \ - } \ - _ha_head->hh_head = (addhh); \ - if ((_ha_head->count >= ((_ha_head->expand_mult + 1U) * HASH_BKT_CAPACITY_THRESH)) \ - && !(addhh)->tbl->noexpand) { \ - HASH_EXPAND_BUCKETS(addhh,(addhh)->tbl, oomed); \ - IF_HASH_NONFATAL_OOM( \ - if (oomed) { \ - HASH_DEL_IN_BKT(head,addhh); \ - } \ - ) \ - } \ -} while (0) - -/* remove an item from a given bucket */ -#define HASH_DEL_IN_BKT(head,delhh) \ -do { \ - UT_hash_bucket *_hd_head = &(head); \ - _hd_head->count--; \ - if (_hd_head->hh_head == (delhh)) { \ - _hd_head->hh_head = (delhh)->hh_next; \ - } \ - if ((delhh)->hh_prev) { \ - (delhh)->hh_prev->hh_next = (delhh)->hh_next; \ - } \ - if ((delhh)->hh_next) { \ - (delhh)->hh_next->hh_prev = (delhh)->hh_prev; \ - } \ -} while (0) - -/* Bucket expansion has the effect of doubling the number of buckets - * and redistributing the items into the new buckets. Ideally the - * items will distribute more or less evenly into the new buckets - * (the extent to which this is true is a measure of the quality of - * the hash function as it applies to the key domain). - * - * With the items distributed into more buckets, the chain length - * (item count) in each bucket is reduced. Thus by expanding buckets - * the hash keeps a bound on the chain length. This bounded chain - * length is the essence of how a hash provides constant time lookup. - * - * The calculation of tbl->ideal_chain_maxlen below deserves some - * explanation. First, keep in mind that we're calculating the ideal - * maximum chain length based on the *new* (doubled) bucket count. - * In fractions this is just n/b (n=number of items,b=new num buckets). - * Since the ideal chain length is an integer, we want to calculate - * ceil(n/b). We don't depend on floating point arithmetic in this - * hash, so to calculate ceil(n/b) with integers we could write - * - * ceil(n/b) = (n/b) + ((n%b)?1:0) - * - * and in fact a previous version of this hash did just that. - * But now we have improved things a bit by recognizing that b is - * always a power of two. We keep its base 2 log handy (call it lb), - * so now we can write this with a bit shift and logical AND: - * - * ceil(n/b) = (n>>lb) + ( (n & (b-1)) ? 1:0) - * - */ -#define HASH_EXPAND_BUCKETS(hh,tbl,oomed) \ -do { \ - unsigned _he_bkt; \ - unsigned _he_bkt_i; \ - struct UT_hash_handle *_he_thh, *_he_hh_nxt; \ - UT_hash_bucket *_he_new_buckets, *_he_newbkt; \ - _he_new_buckets = (UT_hash_bucket*)uthash_malloc( \ - sizeof(struct UT_hash_bucket) * (tbl)->num_buckets * 2U); \ - if (!_he_new_buckets) { \ - HASH_RECORD_OOM(oomed); \ - } else { \ - uthash_bzero(_he_new_buckets, \ - sizeof(struct UT_hash_bucket) * (tbl)->num_buckets * 2U); \ - (tbl)->ideal_chain_maxlen = \ - ((tbl)->num_items >> ((tbl)->log2_num_buckets+1U)) + \ - ((((tbl)->num_items & (((tbl)->num_buckets*2U)-1U)) != 0U) ? 1U : 0U); \ - (tbl)->nonideal_items = 0; \ - for (_he_bkt_i = 0; _he_bkt_i < (tbl)->num_buckets; _he_bkt_i++) { \ - _he_thh = (tbl)->buckets[ _he_bkt_i ].hh_head; \ - while (_he_thh != NULL) { \ - _he_hh_nxt = _he_thh->hh_next; \ - HASH_TO_BKT(_he_thh->hashv, (tbl)->num_buckets * 2U, _he_bkt); \ - _he_newbkt = &(_he_new_buckets[_he_bkt]); \ - if (++(_he_newbkt->count) > (tbl)->ideal_chain_maxlen) { \ - (tbl)->nonideal_items++; \ - if (_he_newbkt->count > _he_newbkt->expand_mult * (tbl)->ideal_chain_maxlen) { \ - _he_newbkt->expand_mult++; \ - } \ - } \ - _he_thh->hh_prev = NULL; \ - _he_thh->hh_next = _he_newbkt->hh_head; \ - if (_he_newbkt->hh_head != NULL) { \ - _he_newbkt->hh_head->hh_prev = _he_thh; \ - } \ - _he_newbkt->hh_head = _he_thh; \ - _he_thh = _he_hh_nxt; \ - } \ - } \ - uthash_free((tbl)->buckets, (tbl)->num_buckets * sizeof(struct UT_hash_bucket)); \ - (tbl)->num_buckets *= 2U; \ - (tbl)->log2_num_buckets++; \ - (tbl)->buckets = _he_new_buckets; \ - (tbl)->ineff_expands = ((tbl)->nonideal_items > ((tbl)->num_items >> 1)) ? \ - ((tbl)->ineff_expands+1U) : 0U; \ - if ((tbl)->ineff_expands > 1U) { \ - (tbl)->noexpand = 1; \ - uthash_noexpand_fyi(tbl); \ - } \ - uthash_expand_fyi(tbl); \ - } \ -} while (0) - - -/* This is an adaptation of Simon Tatham's O(n log(n)) mergesort */ -/* Note that HASH_SORT assumes the hash handle name to be hh. - * HASH_SRT was added to allow the hash handle name to be passed in. */ -#define HASH_SORT(head,cmpfcn) HASH_SRT(hh,head,cmpfcn) -#define HASH_SRT(hh,head,cmpfcn) \ -do { \ - unsigned _hs_i; \ - unsigned _hs_looping,_hs_nmerges,_hs_insize,_hs_psize,_hs_qsize; \ - struct UT_hash_handle *_hs_p, *_hs_q, *_hs_e, *_hs_list, *_hs_tail; \ - if (head != NULL) { \ - _hs_insize = 1; \ - _hs_looping = 1; \ - _hs_list = &((head)->hh); \ - while (_hs_looping != 0U) { \ - _hs_p = _hs_list; \ - _hs_list = NULL; \ - _hs_tail = NULL; \ - _hs_nmerges = 0; \ - while (_hs_p != NULL) { \ - _hs_nmerges++; \ - _hs_q = _hs_p; \ - _hs_psize = 0; \ - for (_hs_i = 0; _hs_i < _hs_insize; ++_hs_i) { \ - _hs_psize++; \ - _hs_q = ((_hs_q->next != NULL) ? \ - HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \ - if (_hs_q == NULL) { \ - break; \ - } \ - } \ - _hs_qsize = _hs_insize; \ - while ((_hs_psize != 0U) || ((_hs_qsize != 0U) && (_hs_q != NULL))) { \ - if (_hs_psize == 0U) { \ - _hs_e = _hs_q; \ - _hs_q = ((_hs_q->next != NULL) ? \ - HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \ - _hs_qsize--; \ - } else if ((_hs_qsize == 0U) || (_hs_q == NULL)) { \ - _hs_e = _hs_p; \ - if (_hs_p != NULL) { \ - _hs_p = ((_hs_p->next != NULL) ? \ - HH_FROM_ELMT((head)->hh.tbl, _hs_p->next) : NULL); \ - } \ - _hs_psize--; \ - } else if ((cmpfcn( \ - DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl, _hs_p)), \ - DECLTYPE(head)(ELMT_FROM_HH((head)->hh.tbl, _hs_q)) \ - )) <= 0) { \ - _hs_e = _hs_p; \ - if (_hs_p != NULL) { \ - _hs_p = ((_hs_p->next != NULL) ? \ - HH_FROM_ELMT((head)->hh.tbl, _hs_p->next) : NULL); \ - } \ - _hs_psize--; \ - } else { \ - _hs_e = _hs_q; \ - _hs_q = ((_hs_q->next != NULL) ? \ - HH_FROM_ELMT((head)->hh.tbl, _hs_q->next) : NULL); \ - _hs_qsize--; \ - } \ - if ( _hs_tail != NULL ) { \ - _hs_tail->next = ((_hs_e != NULL) ? \ - ELMT_FROM_HH((head)->hh.tbl, _hs_e) : NULL); \ - } else { \ - _hs_list = _hs_e; \ - } \ - if (_hs_e != NULL) { \ - _hs_e->prev = ((_hs_tail != NULL) ? \ - ELMT_FROM_HH((head)->hh.tbl, _hs_tail) : NULL); \ - } \ - _hs_tail = _hs_e; \ - } \ - _hs_p = _hs_q; \ - } \ - if (_hs_tail != NULL) { \ - _hs_tail->next = NULL; \ - } \ - if (_hs_nmerges <= 1U) { \ - _hs_looping = 0; \ - (head)->hh.tbl->tail = _hs_tail; \ - DECLTYPE_ASSIGN(head, ELMT_FROM_HH((head)->hh.tbl, _hs_list)); \ - } \ - _hs_insize *= 2U; \ - } \ - HASH_FSCK(hh, head, "HASH_SRT"); \ - } \ -} while (0) - -/* This function selects items from one hash into another hash. - * The end result is that the selected items have dual presence - * in both hashes. There is no copy of the items made; rather - * they are added into the new hash through a secondary hash - * hash handle that must be present in the structure. */ -#define HASH_SELECT(hh_dst, dst, hh_src, src, cond) \ -do { \ - unsigned _src_bkt, _dst_bkt; \ - void *_last_elt = NULL, *_elt; \ - UT_hash_handle *_src_hh, *_dst_hh, *_last_elt_hh=NULL; \ - ptrdiff_t _dst_hho = ((char*)(&(dst)->hh_dst) - (char*)(dst)); \ - if ((src) != NULL) { \ - for (_src_bkt=0; _src_bkt < (src)->hh_src.tbl->num_buckets; _src_bkt++) { \ - for (_src_hh = (src)->hh_src.tbl->buckets[_src_bkt].hh_head; \ - _src_hh != NULL; \ - _src_hh = _src_hh->hh_next) { \ - _elt = ELMT_FROM_HH((src)->hh_src.tbl, _src_hh); \ - if (cond(_elt)) { \ - IF_HASH_NONFATAL_OOM( int _hs_oomed = 0; ) \ - _dst_hh = (UT_hash_handle*)(void*)(((char*)_elt) + _dst_hho); \ - _dst_hh->key = _src_hh->key; \ - _dst_hh->keylen = _src_hh->keylen; \ - _dst_hh->hashv = _src_hh->hashv; \ - _dst_hh->prev = _last_elt; \ - _dst_hh->next = NULL; \ - if (_last_elt_hh != NULL) { \ - _last_elt_hh->next = _elt; \ - } \ - if ((dst) == NULL) { \ - DECLTYPE_ASSIGN(dst, _elt); \ - HASH_MAKE_TABLE(hh_dst, dst, _hs_oomed); \ - IF_HASH_NONFATAL_OOM( \ - if (_hs_oomed) { \ - uthash_nonfatal_oom(_elt); \ - (dst) = NULL; \ - continue; \ - } \ - ) \ - } else { \ - _dst_hh->tbl = (dst)->hh_dst.tbl; \ - } \ - HASH_TO_BKT(_dst_hh->hashv, _dst_hh->tbl->num_buckets, _dst_bkt); \ - HASH_ADD_TO_BKT(_dst_hh->tbl->buckets[_dst_bkt], hh_dst, _dst_hh, _hs_oomed); \ - (dst)->hh_dst.tbl->num_items++; \ - IF_HASH_NONFATAL_OOM( \ - if (_hs_oomed) { \ - HASH_ROLLBACK_BKT(hh_dst, dst, _dst_hh); \ - HASH_DELETE_HH(hh_dst, dst, _dst_hh); \ - _dst_hh->tbl = NULL; \ - uthash_nonfatal_oom(_elt); \ - continue; \ - } \ - ) \ - HASH_BLOOM_ADD(_dst_hh->tbl, _dst_hh->hashv); \ - _last_elt = _elt; \ - _last_elt_hh = _dst_hh; \ - } \ - } \ - } \ - } \ - HASH_FSCK(hh_dst, dst, "HASH_SELECT"); \ -} while (0) - -#define HASH_CLEAR(hh,head) \ -do { \ - if ((head) != NULL) { \ - HASH_BLOOM_FREE((head)->hh.tbl); \ - uthash_free((head)->hh.tbl->buckets, \ - (head)->hh.tbl->num_buckets*sizeof(struct UT_hash_bucket)); \ - uthash_free((head)->hh.tbl, sizeof(UT_hash_table)); \ - (head) = NULL; \ - } \ -} while (0) - -#define HASH_OVERHEAD(hh,head) \ - (((head) != NULL) ? ( \ - (size_t)(((head)->hh.tbl->num_items * sizeof(UT_hash_handle)) + \ - ((head)->hh.tbl->num_buckets * sizeof(UT_hash_bucket)) + \ - sizeof(UT_hash_table) + \ - (HASH_BLOOM_BYTELEN))) : 0U) - -#ifdef NO_DECLTYPE -#define HASH_ITER(hh,head,el,tmp) \ -for(((el)=(head)), ((*(char**)(&(tmp)))=(char*)((head!=NULL)?(head)->hh.next:NULL)); \ - (el) != NULL; ((el)=(tmp)), ((*(char**)(&(tmp)))=(char*)((tmp!=NULL)?(tmp)->hh.next:NULL))) -#else -#define HASH_ITER(hh,head,el,tmp) \ -for(((el)=(head)), ((tmp)=DECLTYPE(el)((head!=NULL)?(head)->hh.next:NULL)); \ - (el) != NULL; ((el)=(tmp)), ((tmp)=DECLTYPE(el)((tmp!=NULL)?(tmp)->hh.next:NULL))) -#endif - -/* obtain a count of items in the hash */ -#define HASH_COUNT(head) HASH_CNT(hh,head) -#define HASH_CNT(hh,head) ((head != NULL)?((head)->hh.tbl->num_items):0U) - -typedef struct UT_hash_bucket { - struct UT_hash_handle *hh_head; - unsigned count; - - /* expand_mult is normally set to 0. In this situation, the max chain length - * threshold is enforced at its default value, HASH_BKT_CAPACITY_THRESH. (If - * the bucket's chain exceeds this length, bucket expansion is triggered). - * However, setting expand_mult to a non-zero value delays bucket expansion - * (that would be triggered by additions to this particular bucket) - * until its chain length reaches a *multiple* of HASH_BKT_CAPACITY_THRESH. - * (The multiplier is simply expand_mult+1). The whole idea of this - * multiplier is to reduce bucket expansions, since they are expensive, in - * situations where we know that a particular bucket tends to be overused. - * It is better to let its chain length grow to a longer yet-still-bounded - * value, than to do an O(n) bucket expansion too often. - */ - unsigned expand_mult; - -} UT_hash_bucket; - -/* random signature used only to find hash tables in external analysis */ -#define HASH_SIGNATURE 0xa0111fe1u -#define HASH_BLOOM_SIGNATURE 0xb12220f2u - -typedef struct UT_hash_table { - UT_hash_bucket *buckets; - unsigned num_buckets, log2_num_buckets; - unsigned num_items; - struct UT_hash_handle *tail; /* tail hh in app order, for fast append */ - ptrdiff_t hho; /* hash handle offset (byte pos of hash handle in element */ - - /* in an ideal situation (all buckets used equally), no bucket would have - * more than ceil(#items/#buckets) items. that's the ideal chain length. */ - unsigned ideal_chain_maxlen; - - /* nonideal_items is the number of items in the hash whose chain position - * exceeds the ideal chain maxlen. these items pay the penalty for an uneven - * hash distribution; reaching them in a chain traversal takes >ideal steps */ - unsigned nonideal_items; - - /* ineffective expands occur when a bucket doubling was performed, but - * afterward, more than half the items in the hash had nonideal chain - * positions. If this happens on two consecutive expansions we inhibit any - * further expansion, as it's not helping; this happens when the hash - * function isn't a good fit for the key domain. When expansion is inhibited - * the hash will still work, albeit no longer in constant time. */ - unsigned ineff_expands, noexpand; - - uint32_t signature; /* used only to find hash tables in external analysis */ -#ifdef HASH_BLOOM - uint32_t bloom_sig; /* used only to test bloom exists in external analysis */ - uint8_t *bloom_bv; - uint8_t bloom_nbits; -#endif - -} UT_hash_table; - -typedef struct UT_hash_handle { - struct UT_hash_table *tbl; - void *prev; /* prev element in app order */ - void *next; /* next element in app order */ - struct UT_hash_handle *hh_prev; /* previous hh in bucket order */ - struct UT_hash_handle *hh_next; /* next hh in bucket order */ - const void *key; /* ptr to enclosing struct's key */ - unsigned keylen; /* enclosing struct's key len */ - unsigned hashv; /* result of hash-fcn(key) */ -} UT_hash_handle; - -#endif /* UTHASH_H */ diff --git a/solo-ckpool-source/src/utlist.h b/solo-ckpool-source/src/utlist.h deleted file mode 100644 index 48a8c7d..0000000 --- a/solo-ckpool-source/src/utlist.h +++ /dev/null @@ -1,757 +0,0 @@ -/* -Copyright (c) 2007-2014, Troy D. Hanson http://troydhanson.github.com/uthash/ -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER -OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - -#ifndef UTLIST_H -#define UTLIST_H - -#define UTLIST_VERSION 1.9.9 - -#include - -/* - * This file contains macros to manipulate singly and doubly-linked lists. - * - * 1. LL_ macros: singly-linked lists. - * 2. DL_ macros: doubly-linked lists. - * 3. CDL_ macros: circular doubly-linked lists. - * - * To use singly-linked lists, your structure must have a "next" pointer. - * To use doubly-linked lists, your structure must "prev" and "next" pointers. - * Either way, the pointer to the head of the list must be initialized to NULL. - * - * ----------------.EXAMPLE ------------------------- - * struct item { - * int id; - * struct item *prev, *next; - * } - * - * struct item *list = NULL: - * - * int main() { - * struct item *item; - * ... allocate and populate item ... - * DL_APPEND(list, item); - * } - * -------------------------------------------------- - * - * For doubly-linked lists, the append and delete macros are O(1) - * For singly-linked lists, append and delete are O(n) but prepend is O(1) - * The sort macro is O(n log(n)) for all types of single/double/circular lists. - */ - -/* These macros use decltype or the earlier __typeof GNU extension. - As decltype is only available in newer compilers (VS2010 or gcc 4.3+ - when compiling c++ code), this code uses whatever method is needed - or, for VS2008 where neither is available, uses casting workarounds. */ -#ifdef _MSC_VER /* MS compiler */ -#if _MSC_VER >= 1600 && defined(__cplusplus) /* VS2010 or newer in C++ mode */ -#define LDECLTYPE(x) decltype(x) -#else /* VS2008 or older (or VS2010 in C mode) */ -#define NO_DECLTYPE -#define LDECLTYPE(x) char* -#endif -#elif defined(__ICCARM__) -#define NO_DECLTYPE -#define LDECLTYPE(x) char* -#else /* GNU, Sun and other compilers */ -#define LDECLTYPE(x) __typeof(x) -#endif - -/* for VS2008 we use some workarounds to get around the lack of decltype, - * namely, we always reassign our tmp variable to the list head if we need - * to dereference its prev/next pointers, and save/restore the real head.*/ -#ifdef NO_DECLTYPE -#define _SV(elt,list) _tmp = (char*)(list); {char **_alias = (char**)&(list); *_alias = (elt); } -#define _NEXT(elt,list,next) ((char*)((list)->next)) -#define _NEXTASGN(elt,list,to,next) { char **_alias = (char**)&((list)->next); *_alias=(char*)(to); } -/* #define _PREV(elt,list,prev) ((char*)((list)->prev)) */ -#define _PREVASGN(elt,list,to,prev) { char **_alias = (char**)&((list)->prev); *_alias=(char*)(to); } -#define _RS(list) { char **_alias = (char**)&(list); *_alias=_tmp; } -#define _CASTASGN(a,b) { char **_alias = (char**)&(a); *_alias=(char*)(b); } -#else -#define _SV(elt,list) -#define _NEXT(elt,list,next) ((elt)->next) -#define _NEXTASGN(elt,list,to,next) ((elt)->next)=(to) -/* #define _PREV(elt,list,prev) ((elt)->prev) */ -#define _PREVASGN(elt,list,to,prev) ((elt)->prev)=(to) -#define _RS(list) -#define _CASTASGN(a,b) (a)=(b) -#endif - -/****************************************************************************** - * The sort macro is an adaptation of Simon Tatham's O(n log(n)) mergesort * - * Unwieldy variable names used here to avoid shadowing passed-in variables. * - *****************************************************************************/ -#define LL_SORT(list, cmp) \ - LL_SORT2(list, cmp, next) - -#define LL_SORT2(list, cmp, next) \ -do { \ - LDECLTYPE(list) _ls_p; \ - LDECLTYPE(list) _ls_q; \ - LDECLTYPE(list) _ls_e; \ - LDECLTYPE(list) _ls_tail; \ - int _ls_insize, _ls_nmerges, _ls_psize, _ls_qsize, _ls_i, _ls_looping; \ - if (list) { \ - _ls_insize = 1; \ - _ls_looping = 1; \ - while (_ls_looping) { \ - _CASTASGN(_ls_p,list); \ - list = NULL; \ - _ls_tail = NULL; \ - _ls_nmerges = 0; \ - while (_ls_p) { \ - _ls_nmerges++; \ - _ls_q = _ls_p; \ - _ls_psize = 0; \ - for (_ls_i = 0; _ls_i < _ls_insize; _ls_i++) { \ - _ls_psize++; \ - _SV(_ls_q,list); _ls_q = _NEXT(_ls_q,list,next); _RS(list); \ - if (!_ls_q) break; \ - } \ - _ls_qsize = _ls_insize; \ - while (_ls_psize > 0 || (_ls_qsize > 0 && _ls_q)) { \ - if (_ls_psize == 0) { \ - _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = \ - _NEXT(_ls_q,list,next); _RS(list); _ls_qsize--; \ - } else if (_ls_qsize == 0 || !_ls_q) { \ - _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = \ - _NEXT(_ls_p,list,next); _RS(list); _ls_psize--; \ - } else if (cmp(_ls_p,_ls_q) <= 0) { \ - _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = \ - _NEXT(_ls_p,list,next); _RS(list); _ls_psize--; \ - } else { \ - _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = \ - _NEXT(_ls_q,list,next); _RS(list); _ls_qsize--; \ - } \ - if (_ls_tail) { \ - _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,_ls_e,next); _RS(list); \ - } else { \ - _CASTASGN(list,_ls_e); \ - } \ - _ls_tail = _ls_e; \ - } \ - _ls_p = _ls_q; \ - } \ - if (_ls_tail) { \ - _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,NULL,next); _RS(list); \ - } \ - if (_ls_nmerges <= 1) { \ - _ls_looping=0; \ - } \ - _ls_insize *= 2; \ - } \ - } \ -} while (0) - - -#define DL_SORT(list, cmp) \ - DL_SORT2(list, cmp, prev, next) - -#define DL_SORT2(list, cmp, prev, next) \ -do { \ - LDECLTYPE(list) _ls_p; \ - LDECLTYPE(list) _ls_q; \ - LDECLTYPE(list) _ls_e; \ - LDECLTYPE(list) _ls_tail; \ - int _ls_insize, _ls_nmerges, _ls_psize, _ls_qsize, _ls_i, _ls_looping; \ - if (list) { \ - _ls_insize = 1; \ - _ls_looping = 1; \ - while (_ls_looping) { \ - _CASTASGN(_ls_p,list); \ - list = NULL; \ - _ls_tail = NULL; \ - _ls_nmerges = 0; \ - while (_ls_p) { \ - _ls_nmerges++; \ - _ls_q = _ls_p; \ - _ls_psize = 0; \ - for (_ls_i = 0; _ls_i < _ls_insize; _ls_i++) { \ - _ls_psize++; \ - _SV(_ls_q,list); _ls_q = _NEXT(_ls_q,list,next); _RS(list); \ - if (!_ls_q) break; \ - } \ - _ls_qsize = _ls_insize; \ - while (_ls_psize > 0 || (_ls_qsize > 0 && _ls_q)) { \ - if (_ls_psize == 0) { \ - _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = \ - _NEXT(_ls_q,list,next); _RS(list); _ls_qsize--; \ - } else if (_ls_qsize == 0 || !_ls_q) { \ - _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = \ - _NEXT(_ls_p,list,next); _RS(list); _ls_psize--; \ - } else if (cmp(_ls_p,_ls_q) <= 0) { \ - _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = \ - _NEXT(_ls_p,list,next); _RS(list); _ls_psize--; \ - } else { \ - _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = \ - _NEXT(_ls_q,list,next); _RS(list); _ls_qsize--; \ - } \ - if (_ls_tail) { \ - _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,_ls_e,next); _RS(list); \ - } else { \ - _CASTASGN(list,_ls_e); \ - } \ - _SV(_ls_e,list); _PREVASGN(_ls_e,list,_ls_tail,prev); _RS(list); \ - _ls_tail = _ls_e; \ - } \ - _ls_p = _ls_q; \ - } \ - _CASTASGN(list->prev, _ls_tail); \ - _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,NULL,next); _RS(list); \ - if (_ls_nmerges <= 1) { \ - _ls_looping=0; \ - } \ - _ls_insize *= 2; \ - } \ - } \ -} while (0) - -#define CDL_SORT(list, cmp) \ - CDL_SORT2(list, cmp, prev, next) - -#define CDL_SORT2(list, cmp, prev, next) \ -do { \ - LDECLTYPE(list) _ls_p; \ - LDECLTYPE(list) _ls_q; \ - LDECLTYPE(list) _ls_e; \ - LDECLTYPE(list) _ls_tail; \ - LDECLTYPE(list) _ls_oldhead; \ - LDECLTYPE(list) _tmp; \ - int _ls_insize, _ls_nmerges, _ls_psize, _ls_qsize, _ls_i, _ls_looping; \ - if (list) { \ - _ls_insize = 1; \ - _ls_looping = 1; \ - while (_ls_looping) { \ - _CASTASGN(_ls_p,list); \ - _CASTASGN(_ls_oldhead,list); \ - list = NULL; \ - _ls_tail = NULL; \ - _ls_nmerges = 0; \ - while (_ls_p) { \ - _ls_nmerges++; \ - _ls_q = _ls_p; \ - _ls_psize = 0; \ - for (_ls_i = 0; _ls_i < _ls_insize; _ls_i++) { \ - _ls_psize++; \ - _SV(_ls_q,list); \ - if (_NEXT(_ls_q,list,next) == _ls_oldhead) { \ - _ls_q = NULL; \ - } else { \ - _ls_q = _NEXT(_ls_q,list,next); \ - } \ - _RS(list); \ - if (!_ls_q) break; \ - } \ - _ls_qsize = _ls_insize; \ - while (_ls_psize > 0 || (_ls_qsize > 0 && _ls_q)) { \ - if (_ls_psize == 0) { \ - _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = \ - _NEXT(_ls_q,list,next); _RS(list); _ls_qsize--; \ - if (_ls_q == _ls_oldhead) { _ls_q = NULL; } \ - } else if (_ls_qsize == 0 || !_ls_q) { \ - _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = \ - _NEXT(_ls_p,list,next); _RS(list); _ls_psize--; \ - if (_ls_p == _ls_oldhead) { _ls_p = NULL; } \ - } else if (cmp(_ls_p,_ls_q) <= 0) { \ - _ls_e = _ls_p; _SV(_ls_p,list); _ls_p = \ - _NEXT(_ls_p,list,next); _RS(list); _ls_psize--; \ - if (_ls_p == _ls_oldhead) { _ls_p = NULL; } \ - } else { \ - _ls_e = _ls_q; _SV(_ls_q,list); _ls_q = \ - _NEXT(_ls_q,list,next); _RS(list); _ls_qsize--; \ - if (_ls_q == _ls_oldhead) { _ls_q = NULL; } \ - } \ - if (_ls_tail) { \ - _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,_ls_e,next); _RS(list); \ - } else { \ - _CASTASGN(list,_ls_e); \ - } \ - _SV(_ls_e,list); _PREVASGN(_ls_e,list,_ls_tail,prev); _RS(list); \ - _ls_tail = _ls_e; \ - } \ - _ls_p = _ls_q; \ - } \ - _CASTASGN(list->prev,_ls_tail); \ - _CASTASGN(_tmp,list); \ - _SV(_ls_tail,list); _NEXTASGN(_ls_tail,list,_tmp,next); _RS(list); \ - if (_ls_nmerges <= 1) { \ - _ls_looping=0; \ - } \ - _ls_insize *= 2; \ - } \ - } \ -} while (0) - -/****************************************************************************** - * singly linked list macros (non-circular) * - *****************************************************************************/ -#define LL_PREPEND(head,add) \ - LL_PREPEND2(head,add,next) - -#define LL_PREPEND2(head,add,next) \ -do { \ - (add)->next = head; \ - head = add; \ -} while (0) - -#define LL_CONCAT(head1,head2) \ - LL_CONCAT2(head1,head2,next) - -#define LL_CONCAT2(head1,head2,next) \ -do { \ - LDECLTYPE(head1) _tmp; \ - if (head1) { \ - _tmp = head1; \ - while (_tmp->next) { _tmp = _tmp->next; } \ - _tmp->next=(head2); \ - } else { \ - (head1)=(head2); \ - } \ -} while (0) - -#define LL_APPEND(head,add) \ - LL_APPEND2(head,add,next) - -#define LL_APPEND2(head,add,next) \ -do { \ - LDECLTYPE(head) _tmp; \ - (add)->next=NULL; \ - if (head) { \ - _tmp = head; \ - while (_tmp->next) { _tmp = _tmp->next; } \ - _tmp->next=(add); \ - } else { \ - (head)=(add); \ - } \ -} while (0) - -#define LL_DELETE(head,del) \ - LL_DELETE2(head,del,next) - -#define LL_DELETE2(head,del,next) \ -do { \ - LDECLTYPE(head) _tmp; \ - if ((head) == (del)) { \ - (head)=(head)->next; \ - } else { \ - _tmp = head; \ - while (_tmp->next && (_tmp->next != (del))) { \ - _tmp = _tmp->next; \ - } \ - if (_tmp->next) { \ - _tmp->next = ((del)->next); \ - } \ - } \ -} while (0) - -/* Here are VS2008 replacements for LL_APPEND and LL_DELETE */ -#define LL_APPEND_VS2008(head,add) \ - LL_APPEND2_VS2008(head,add,next) - -#define LL_APPEND2_VS2008(head,add,next) \ -do { \ - if (head) { \ - (add)->next = head; /* use add->next as a temp variable */ \ - while ((add)->next->next) { (add)->next = (add)->next->next; } \ - (add)->next->next=(add); \ - } else { \ - (head)=(add); \ - } \ - (add)->next=NULL; \ -} while (0) - -#define LL_DELETE_VS2008(head,del) \ - LL_DELETE2_VS2008(head,del,next) - -#define LL_DELETE2_VS2008(head,del,next) \ -do { \ - if ((head) == (del)) { \ - (head)=(head)->next; \ - } else { \ - char *_tmp = (char*)(head); \ - while ((head)->next && ((head)->next != (del))) { \ - head = (head)->next; \ - } \ - if ((head)->next) { \ - (head)->next = ((del)->next); \ - } \ - { \ - char **_head_alias = (char**)&(head); \ - *_head_alias = _tmp; \ - } \ - } \ -} while (0) -#ifdef NO_DECLTYPE -#undef LL_APPEND -#define LL_APPEND LL_APPEND_VS2008 -#undef LL_DELETE -#define LL_DELETE LL_DELETE_VS2008 -#undef LL_DELETE2 -#define LL_DELETE2 LL_DELETE2_VS2008 -#undef LL_APPEND2 -#define LL_APPEND2 LL_APPEND2_VS2008 -#undef LL_CONCAT /* no LL_CONCAT_VS2008 */ -#undef DL_CONCAT /* no DL_CONCAT_VS2008 */ -#endif -/* end VS2008 replacements */ - -#define LL_COUNT(head,el,counter) \ - LL_COUNT2(head,el,counter,next) \ - -#define LL_COUNT2(head,el,counter,next) \ -{ \ - counter = 0; \ - LL_FOREACH2(head,el,next){ ++counter; } \ -} - -#define LL_FOREACH(head,el) \ - LL_FOREACH2(head,el,next) - -#define LL_FOREACH2(head,el,next) \ - for(el=head;el;el=(el)->next) - -#define LL_FOREACH_SAFE(head,el,tmp) \ - LL_FOREACH_SAFE2(head,el,tmp,next) - -#define LL_FOREACH_SAFE2(head,el,tmp,next) \ - for((el)=(head);(el) && (tmp = (el)->next, 1); (el) = tmp) - -#define LL_SEARCH_SCALAR(head,out,field,val) \ - LL_SEARCH_SCALAR2(head,out,field,val,next) - -#define LL_SEARCH_SCALAR2(head,out,field,val,next) \ -do { \ - LL_FOREACH2(head,out,next) { \ - if ((out)->field == (val)) break; \ - } \ -} while(0) - -#define LL_SEARCH(head,out,elt,cmp) \ - LL_SEARCH2(head,out,elt,cmp,next) - -#define LL_SEARCH2(head,out,elt,cmp,next) \ -do { \ - LL_FOREACH2(head,out,next) { \ - if ((cmp(out,elt))==0) break; \ - } \ -} while(0) - -#define LL_REPLACE_ELEM(head, el, add) \ -do { \ - LDECLTYPE(head) _tmp; \ - assert(head != NULL); \ - assert(el != NULL); \ - assert(add != NULL); \ - (add)->next = (el)->next; \ - if ((head) == (el)) { \ - (head) = (add); \ - } else { \ - _tmp = head; \ - while (_tmp->next && (_tmp->next != (el))) { \ - _tmp = _tmp->next; \ - } \ - if (_tmp->next) { \ - _tmp->next = (add); \ - } \ - } \ -} while (0) - -#define LL_PREPEND_ELEM(head, el, add) \ -do { \ - LDECLTYPE(head) _tmp; \ - assert(head != NULL); \ - assert(el != NULL); \ - assert(add != NULL); \ - (add)->next = (el); \ - if ((head) == (el)) { \ - (head) = (add); \ - } else { \ - _tmp = head; \ - while (_tmp->next && (_tmp->next != (el))) { \ - _tmp = _tmp->next; \ - } \ - if (_tmp->next) { \ - _tmp->next = (add); \ - } \ - } \ -} while (0) \ - - -/****************************************************************************** - * doubly linked list macros (non-circular) * - *****************************************************************************/ -#define DL_PREPEND(head,add) \ - DL_PREPEND2(head,add,prev,next) - -#define DL_PREPEND2(head,add,prev,next) \ -do { \ - (add)->next = head; \ - if (head) { \ - (add)->prev = (head)->prev; \ - (head)->prev = (add); \ - } else { \ - (add)->prev = (add); \ - } \ - (head) = (add); \ -} while (0) - -#define DL_APPEND(head,add) \ - DL_APPEND2(head,add,prev,next) - -#define DL_APPEND2(head,add,prev,next) \ -do { \ - if (head) { \ - (add)->prev = (head)->prev; \ - (head)->prev->next = (add); \ - (head)->prev = (add); \ - (add)->next = NULL; \ - } else { \ - (head)=(add); \ - (head)->prev = (head); \ - (head)->next = NULL; \ - } \ -} while (0) - -#define DL_CONCAT(head1,head2) \ - DL_CONCAT2(head1,head2,prev,next) - -#define DL_CONCAT2(head1,head2,prev,next) \ -do { \ - LDECLTYPE(head1) _tmp; \ - if (head2) { \ - if (head1) { \ - _tmp = (head2)->prev; \ - (head2)->prev = (head1)->prev; \ - (head1)->prev->next = (head2); \ - (head1)->prev = _tmp; \ - } else { \ - (head1)=(head2); \ - } \ - } \ -} while (0) - -#define DL_DELETE(head,del) \ - DL_DELETE2(head,del,prev,next) - -#define DL_DELETE2(head,del,prev,next) \ -do { \ - assert((del)->prev != NULL); \ - if ((del)->prev == (del)) { \ - (head)=NULL; \ - } else if ((del)==(head)) { \ - (del)->next->prev = (del)->prev; \ - (head) = (del)->next; \ - } else { \ - (del)->prev->next = (del)->next; \ - if ((del)->next) { \ - (del)->next->prev = (del)->prev; \ - } else { \ - (head)->prev = (del)->prev; \ - } \ - } \ -} while (0) - -#define DL_COUNT(head,el,counter) \ - DL_COUNT2(head,el,counter,next) \ - -#define DL_COUNT2(head,el,counter,next) \ -{ \ - counter = 0; \ - DL_FOREACH2(head,el,next){ ++counter; } \ -} - -#define DL_FOREACH(head,el) \ - DL_FOREACH2(head,el,next) - -#define DL_FOREACH2(head,el,next) \ - for(el=head;el;el=(el)->next) - -/* this version is safe for deleting the elements during iteration */ -#define DL_FOREACH_SAFE(head,el,tmp) \ - DL_FOREACH_SAFE2(head,el,tmp,next) - -#define DL_FOREACH_SAFE2(head,el,tmp,next) \ - for((el)=(head);(el) && (tmp = (el)->next, 1); (el) = tmp) - -/* these are identical to their singly-linked list counterparts */ -#define DL_SEARCH_SCALAR LL_SEARCH_SCALAR -#define DL_SEARCH LL_SEARCH -#define DL_SEARCH_SCALAR2 LL_SEARCH_SCALAR2 -#define DL_SEARCH2 LL_SEARCH2 - -#define DL_REPLACE_ELEM(head, el, add) \ -do { \ - assert(head != NULL); \ - assert(el != NULL); \ - assert(add != NULL); \ - if ((head) == (el)) { \ - (head) = (add); \ - (add)->next = (el)->next; \ - if ((el)->next == NULL) { \ - (add)->prev = (add); \ - } else { \ - (add)->prev = (el)->prev; \ - (add)->next->prev = (add); \ - } \ - } else { \ - (add)->next = (el)->next; \ - (add)->prev = (el)->prev; \ - (add)->prev->next = (add); \ - if ((el)->next == NULL) { \ - (head)->prev = (add); \ - } else { \ - (add)->next->prev = (add); \ - } \ - } \ -} while (0) - -#define DL_PREPEND_ELEM(head, el, add) \ -do { \ - assert(head != NULL); \ - assert(el != NULL); \ - assert(add != NULL); \ - (add)->next = (el); \ - (add)->prev = (el)->prev; \ - (el)->prev = (add); \ - if ((head) == (el)) { \ - (head) = (add); \ - } else { \ - (add)->prev->next = (add); \ - } \ -} while (0) \ - - -/****************************************************************************** - * circular doubly linked list macros * - *****************************************************************************/ -#define CDL_PREPEND(head,add) \ - CDL_PREPEND2(head,add,prev,next) - -#define CDL_PREPEND2(head,add,prev,next) \ -do { \ - if (head) { \ - (add)->prev = (head)->prev; \ - (add)->next = (head); \ - (head)->prev = (add); \ - (add)->prev->next = (add); \ - } else { \ - (add)->prev = (add); \ - (add)->next = (add); \ - } \ -(head)=(add); \ -} while (0) - -#define CDL_DELETE(head,del) \ - CDL_DELETE2(head,del,prev,next) - -#define CDL_DELETE2(head,del,prev,next) \ -do { \ - if ( ((head)==(del)) && ((head)->next == (head))) { \ - (head) = 0L; \ - } else { \ - (del)->next->prev = (del)->prev; \ - (del)->prev->next = (del)->next; \ - if ((del) == (head)) (head)=(del)->next; \ - } \ -} while (0) - -#define CDL_COUNT(head,el,counter) \ - CDL_COUNT2(head,el,counter,next) \ - -#define CDL_COUNT2(head, el, counter,next) \ -{ \ - counter = 0; \ - CDL_FOREACH2(head,el,next){ ++counter; } \ -} - -#define CDL_FOREACH(head,el) \ - CDL_FOREACH2(head,el,next) - -#define CDL_FOREACH2(head,el,next) \ - for(el=head;el;el=((el)->next==head ? 0L : (el)->next)) - -#define CDL_FOREACH_SAFE(head,el,tmp1,tmp2) \ - CDL_FOREACH_SAFE2(head,el,tmp1,tmp2,prev,next) - -#define CDL_FOREACH_SAFE2(head,el,tmp1,tmp2,prev,next) \ - for((el)=(head), ((tmp1)=(head)?((head)->prev):NULL); \ - (el) && ((tmp2)=(el)->next, 1); \ - ((el) = (((el)==(tmp1)) ? 0L : (tmp2)))) - -#define CDL_SEARCH_SCALAR(head,out,field,val) \ - CDL_SEARCH_SCALAR2(head,out,field,val,next) - -#define CDL_SEARCH_SCALAR2(head,out,field,val,next) \ -do { \ - CDL_FOREACH2(head,out,next) { \ - if ((out)->field == (val)) break; \ - } \ -} while(0) - -#define CDL_SEARCH(head,out,elt,cmp) \ - CDL_SEARCH2(head,out,elt,cmp,next) - -#define CDL_SEARCH2(head,out,elt,cmp,next) \ -do { \ - CDL_FOREACH2(head,out,next) { \ - if ((cmp(out,elt))==0) break; \ - } \ -} while(0) - -#define CDL_REPLACE_ELEM(head, el, add) \ -do { \ - assert(head != NULL); \ - assert(el != NULL); \ - assert(add != NULL); \ - if ((el)->next == (el)) { \ - (add)->next = (add); \ - (add)->prev = (add); \ - (head) = (add); \ - } else { \ - (add)->next = (el)->next; \ - (add)->prev = (el)->prev; \ - (add)->next->prev = (add); \ - (add)->prev->next = (add); \ - if ((head) == (el)) { \ - (head) = (add); \ - } \ - } \ -} while (0) - -#define CDL_PREPEND_ELEM(head, el, add) \ -do { \ - assert(head != NULL); \ - assert(el != NULL); \ - assert(add != NULL); \ - (add)->next = (el); \ - (add)->prev = (el)->prev; \ - (el)->prev = (add); \ - (add)->prev->next = (add); \ - if ((head) == (el)) { \ - (head) = (add); \ - } \ -} while (0) \ - -#endif /* UTLIST_H */ - diff --git a/solo-ckpool.dockerfile b/solo-ckpool.dockerfile index 69acac8..00a9a7e 100644 --- a/solo-ckpool.dockerfile +++ b/solo-ckpool.dockerfile @@ -17,11 +17,12 @@ RUN apt-get update || true && \ libtool \ && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* -# Copy pre-cloned solo-ckpool source +# Clone solo-ckpool with fractional difficulty support WORKDIR /build -COPY solo-ckpool-source/ ckpool-solo/ +RUN git clone https://github.com/xyephy/solo-ckpool.git ckpool-solo WORKDIR /build/ckpool-solo +RUN git checkout solobtc # Build ckpool-solo RUN ./autogen.sh && \ From 7c42000661711283d88f2f5c83800d6eaff2153f Mon Sep 17 00:00:00 2001 From: xyephy Date: Tue, 8 Jul 2025 19:37:52 +0300 Subject: [PATCH 3/4] Add shares_per_minute configuration support to solo-ckpool integration - Enhanced solo-ckpool.dockerfile to use master branch with shares_per_minute feature - Updated configuration with 15.0 shares per minute for benchmarking consistency - Added comprehensive documentation for the new feature - Fixed SSL certificate verification issue in Docker build - Enables standardized share rates for fair SV1 vs SV2 performance comparisons Testing validated: - 5 spm: Low-latency scenarios (1 share every 12 seconds) - 15 spm: Balanced benchmarking (1 share every 4 seconds) - 30 spm: High-throughput testing (1 share every 2 seconds) - Backward compatibility maintained with 18.0 spm default --- SOLO_CKPOOL_MODIFICATIONS.md | 35 +++++++++++++++++++++++--- custom-configs/solo-ckpool/ckpool.conf | 1 + solo-ckpool.dockerfile | 6 ++--- 3 files changed, 36 insertions(+), 6 deletions(-) diff --git a/SOLO_CKPOOL_MODIFICATIONS.md b/SOLO_CKPOOL_MODIFICATIONS.md index 1c5856e..69be0d5 100644 --- a/SOLO_CKPOOL_MODIFICATIONS.md +++ b/SOLO_CKPOOL_MODIFICATIONS.md @@ -123,15 +123,44 @@ All existing SV1 metrics are collected: **Configuration Examples**: ```json { - "mindiff": 0.1, // Minimum difficulty of 0.1 (fractional) - "startdiff": 0.5, // Starting difficulty of 0.5 (fractional) - "maxdiff": 100.0 // Maximum difficulty of 100.0 + "mindiff": 0.1, // Minimum difficulty of 0.1 (fractional) + "startdiff": 0.5, // Starting difficulty of 0.5 (fractional) + "maxdiff": 100.0, // Maximum difficulty of 100.0 + "shares_per_minute": 15.0 // Target 15 shares per minute for consistent benchmarking } ``` **Backward Compatibility**: - Integer values still work: `"mindiff": 1, "startdiff": 42` - Mixed configurations supported: `"mindiff": 0.5, "startdiff": 10` +- Default shares_per_minute: 18.0 (maintains existing behavior if not specified) + +### Shares Per Minute Control ✅ + +**NEW**: Solo-ckpool now supports configurable share submission rates for standardized benchmarking. + +**Key Benefits**: +- ✅ **Consistent benchmarking**: Set identical share rates for SV1 and SV2 comparisons +- ✅ **Flexible testing scenarios**: Easily adjust load characteristics +- ✅ **Real-time adjustment**: Variable difficulty adapts to maintain target rate +- ✅ **Backward compatibility**: Default 18.0 spm maintains existing behavior + +**Configuration Examples for Different Scenarios**: +```json +// Low-latency testing +{"shares_per_minute": 5.0} // 1 share every 12 seconds + +// Balanced benchmarking +{"shares_per_minute": 15.0} // 1 share every 4 seconds + +// High-throughput testing +{"shares_per_minute": 30.0} // 1 share every 2 seconds +``` + +**Algorithm Enhancement**: +- Dynamic difficulty adjustment based on `shares_per_minute / 60.0` target rate +- Proportional hysteresis bounds to prevent oscillation +- Configurable timing thresholds based on target share rate ## Limitations and Considerations diff --git a/custom-configs/solo-ckpool/ckpool.conf b/custom-configs/solo-ckpool/ckpool.conf index 76bc53c..2abc8d6 100644 --- a/custom-configs/solo-ckpool/ckpool.conf +++ b/custom-configs/solo-ckpool/ckpool.conf @@ -8,6 +8,7 @@ "mindiff": 0.1, "startdiff": 0.5, "maxdiff": 0.0, + "shares_per_minute": 15.0, "btcaddress": "tb1qa0sm0hxzj0x25rh8gw5xlzwlsfvvyz8u96w3p8", "serverurl": ["0.0.0.0:3333"], "logdir": "/var/log/ckpool", diff --git a/solo-ckpool.dockerfile b/solo-ckpool.dockerfile index 00a9a7e..0717c5c 100644 --- a/solo-ckpool.dockerfile +++ b/solo-ckpool.dockerfile @@ -17,12 +17,12 @@ RUN apt-get update || true && \ libtool \ && apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* -# Clone solo-ckpool with fractional difficulty support +# Clone solo-ckpool with fractional difficulty and shares_per_minute support WORKDIR /build -RUN git clone https://github.com/xyephy/solo-ckpool.git ckpool-solo +RUN git config --global http.sslverify false && \ + git clone https://github.com/xyephy/solo-ckpool.git ckpool-solo WORKDIR /build/ckpool-solo -RUN git checkout solobtc # Build ckpool-solo RUN ./autogen.sh && \ From b2ad082d4bddaf15fe76b706fccb31296eb64787 Mon Sep 17 00:00:00 2001 From: xyephy Date: Tue, 8 Jul 2025 19:54:31 +0300 Subject: [PATCH 4/4] Update Dockerfile to reference solobtc branch for shares_per_minute feature --- solo-ckpool.dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/solo-ckpool.dockerfile b/solo-ckpool.dockerfile index 0717c5c..2e34748 100644 --- a/solo-ckpool.dockerfile +++ b/solo-ckpool.dockerfile @@ -20,7 +20,7 @@ RUN apt-get update || true && \ # Clone solo-ckpool with fractional difficulty and shares_per_minute support WORKDIR /build RUN git config --global http.sslverify false && \ - git clone https://github.com/xyephy/solo-ckpool.git ckpool-solo + git clone -b solobtc https://github.com/xyephy/solo-ckpool.git ckpool-solo WORKDIR /build/ckpool-solo