#include "../include/odhtdb/Database.hpp" #include "../include/odhtdb/Group.hpp" #include "../include/odhtdb/LocalUser.hpp" #include "../include/odhtdb/RemoteUser.hpp" #include "../include/odhtdb/Encryption.hpp" #include "../include/odhtdb/DhtKey.hpp" #include "../include/odhtdb/bin2hex.hpp" #include "../include/odhtdb/Log.hpp" #include #include #include #include #include #include #include #include #include using namespace dht; using namespace std; using namespace chrono_literals; static int databaseCount = 0; // TODO: Verify time_t is always signed static time_t timeOffset = 0; // Updated by comparing local time with ntp server static odhtdb::u64 timeOffsetFraction = 0; static thread *ntpThread = nullptr; static bool timestampSynced = false; static InfoHash CREATE_DATA_HASH = InfoHash::get("__odhtdb__.create_data"); static InfoHash ADD_DATA_HASH = InfoHash::get("__odhtdb__.add_data"); const int OPENDHT_INFOHASH_LEN = 20; namespace odhtdb { const u16 DATABASE_CREATE_PACKET_STRUCTURE_VERSION = 1; const u16 DATABASE_ADD_PACKET_STRUCTURE_VERSION = 1; class RequestQuarantineException : public runtime_error { public: RequestQuarantineException() : runtime_error("Request quarantine, will be processed later (can be real of fake request)") {} }; DataView combine(sibs::SafeSerializer &headerSerializer, const Encryption &encryptedData) { usize allocationSize = headerSerializer.getBuffer().size() + encryptedData.getNonce().size + encryptedData.getCipherText().size; char *result = new char[allocationSize]; memcpy(result, headerSerializer.getBuffer().data(), headerSerializer.getBuffer().size()); memcpy(result + headerSerializer.getBuffer().size(), encryptedData.getNonce().data, encryptedData.getNonce().size); memcpy(result + headerSerializer.getBuffer().size() + encryptedData.getNonce().size, encryptedData.getCipherText().data, encryptedData.getCipherText().size); return DataView(result, allocationSize); } DataView combine(const Signature::PublicKey &publicKey, const string &signedEncryptedData) { usize allocationSize = publicKey.getSize() + signedEncryptedData.size(); char *result = new char[allocationSize]; memcpy(result, publicKey.getData(), publicKey.getSize()); memcpy(result + publicKey.getSize(), signedEncryptedData.data(), signedEncryptedData.size()); return DataView(result, allocationSize); } DatabaseCreateResponse::DatabaseCreateResponse(LocalUser *_nodeAdminUser, const shared_ptr &_key, const shared_ptr &_hash) : nodeAdminUser(_nodeAdminUser), key(_key), hash(_hash) { } const LocalUser* DatabaseCreateResponse::getNodeAdminUser() const { return nodeAdminUser; } const shared_ptr DatabaseCreateResponse::getNodeEncryptionKey() const { return key; } const shared_ptr DatabaseCreateResponse::getRequestHash() const { return hash; } Database::Database(const char *bootstrapNodeAddr, u16 port, const boost::filesystem::path &storageDir) : onCreateNodeCallbackFunc(nullptr), onAddNodeCallbackFunc(nullptr), onAddUserCallbackFunc(nullptr), databaseStorage(this, storageDir) { node.run(port , { /*.dht_config = */{ /*.node_config = */{ /*.node_id = */{}, /*.network = */0, /*.is_bootstrap = */false, /*.maintain_storage*/false }, /*.id = */databaseStorage.getIdentity() }, /*.threaded = */true, /*.proxy_server = */"", /*.push_node_id = */"" }); auto portStr = to_string(port); node.bootstrap(bootstrapNodeAddr, portStr.c_str()); // TODO: Make this work for multiple threads initializing database at same time ++databaseCount; if(databaseCount == 1) { if(ntpThread) delete ntpThread; const int ntpFetchTimestampRetries = 5; ntpThread = new thread([]() { ntp::NtpClient ntpClient("pool.ntp.org", 10); while(databaseCount > 0) { int fetchRetryCounter = 0; while(fetchRetryCounter < ntpFetchTimestampRetries) { try { ntp::NtpTimestamp ntpTimestamp = ntpClient.getTimestamp(); struct timeval currentLocalTime; gettimeofday(¤tLocalTime, NULL); timeOffset = currentLocalTime.tv_sec - ntpTimestamp.seconds; timeOffsetFraction = currentLocalTime.tv_usec - ntpTimestamp.fractions; timestampSynced = true; break; } catch(ntp::NtpClientException &e) { Log::warn("Failed to sync clock with ntp server, reason: %s. Try #%d", e.what(), fetchRetryCounter); this_thread::sleep_for(500ms); } ++fetchRetryCounter; } if(fetchRetryCounter == ntpFetchTimestampRetries) throw ntp::NtpClientException("Failed to retrieve ntp timestamp after several retries"); this_thread::sleep_for(60s); } timestampSynced = false; }); ntpThread->detach(); } } Database::~Database() { // TODO: Make this work for multiple threads removing database object at same time --databaseCount; node.join(); } void Database::seed(const DatabaseNode &nodeToSeed) { if(seedInfoMap.find(*nodeToSeed.getRequestHash()) != seedInfoMap.end()) { Log::warn("You are already seeding node %s, ignoring...", nodeToSeed.getRequestHash()->toString().c_str()); return; } DatabaseSeedInfo newSeedInfo; // TODO: Use cached files and seed those. If none exists, request new files to seed. // If nobody requests my cached files in a long time, request new files to seed and remove cached files // (only if there are plenty of other seeders for the cached files. This could also cause race issue // where all nodes with a cached file delete it at same time). databaseStorage.setNodeDecryptionKey(*nodeToSeed.getRequestHash(), DataView(nodeToSeed.getNodeEncryptionKey()->data, nodeToSeed.getNodeEncryptionKey()->size)); Log::debug("Seeding key: %s", nodeToSeed.getRequestHash()->toString().c_str()); DhtKey dhtKey(*nodeToSeed.getRequestHash()); auto newDataListenerFuture = node.listen(dhtKey.getNewDataListenerKey(), [this, nodeToSeed](const shared_ptr &value) { Log::debug("Seed: New data listener received data..."); const Hash requestHash(value->data.data(), value->data.size()); if(requestHash == *nodeToSeed.getRequestHash()) return true; //return listenCreateData(value, requestHash, encryptionKey); else return listenAddData(value, requestHash, nodeToSeed.getRequestHash(), nodeToSeed.getNodeEncryptionKey()); }); newSeedInfo.newDataListenerFuture = make_shared>(move(newDataListenerFuture)); u8 responseKey[OPENDHT_INFOHASH_LEN]; randombytes_buf(responseKey, OPENDHT_INFOHASH_LEN); newSeedInfo.reponseKeyInfoHash = make_shared(responseKey, OPENDHT_INFOHASH_LEN); // TODO: If this response key is spammed, generate a new one. auto responseKeyFuture = node.listen(*newSeedInfo.reponseKeyInfoHash, [this, nodeToSeed](const shared_ptr &value) { const Hash requestHash(value->data.data(), value->data.size()); if(requestHash == *nodeToSeed.getRequestHash()) return listenCreateData(value, requestHash, nodeToSeed.getNodeEncryptionKey()); else return listenAddData(value, requestHash, nodeToSeed.getRequestHash(), nodeToSeed.getNodeEncryptionKey()); }); newSeedInfo.responseKeyFuture = make_shared>(move(responseKeyFuture)); // TODO: Before listening on this key, we should check how many remote peers are also providing this data. // This is to prevent too many peers from responding to a request to get old data. auto requestOldDataListenerFuture = node.listen(dhtKey.getRequestOldDataKey(), [this, nodeToSeed](const shared_ptr &value) { Log::debug("Request: Got request to send old data"); try { sibs::SafeDeserializer deserializer(value->data.data(), value->data.size()); u64 dataStartTimestamp = deserializer.extract(); u8 requestResponseKey[OPENDHT_INFOHASH_LEN]; deserializer.extract(requestResponseKey, OPENDHT_INFOHASH_LEN); auto requestedData = databaseStorage.getStorage(*nodeToSeed.getRequestHash()); if(!requestedData) { Log::debug("No data found for hash %s, unable to serve peer", nodeToSeed.getRequestHash()->toString().c_str()); return true; } InfoHash requestResponseInfoHash(requestResponseKey, OPENDHT_INFOHASH_LEN); if(dataStartTimestamp == 0) { Log::debug("Request: Sent create packet to requesting peer"); node.put(requestResponseInfoHash, Value((u8*)requestedData->data.data, requestedData->data.size), [](bool ok) { if(!ok) Log::error("Failed to put response for old data for 'create' data"); }); } for(auto requestedObject : requestedData->objects) { node.put(requestResponseInfoHash, Value((u8*)requestedObject->data.data, requestedObject->data.size), [](bool ok) { if(!ok) Log::error("Failed to put response for old data for 'add' data"); }); } } catch (sibs::DeserializeException &e) { Log::warn("Failed to deserialize 'get old data' request: %s", e.what()); } return true; }); newSeedInfo.requestOldDataListenerFuture = make_shared>(move(requestOldDataListenerFuture)); seedInfoMap[*nodeToSeed.getRequestHash()] = newSeedInfo; sibs::SafeSerializer serializer; serializer.add((u64)0); // Timestamp in microseconds, fetch data newer than this. // TODO: Get timestamp from database storage serializer.add(responseKey, OPENDHT_INFOHASH_LEN); node.put(dhtKey.getRequestOldDataKey(), Value(serializer.getBuffer().data(), serializer.getBuffer().size()), [](bool ok) { if(!ok) Log::warn("Failed to put request to get old data"); }); //node.listen(CREATE_DATA_HASH, bind(&Database::listenCreateData, this, _1)); //node.listen(ADD_DATA_HASH, bind(&Database::listenAddData, this, _1)); } void Database::stopSeeding(const Hash &nodeHash) { auto seedInfoIt = seedInfoMap.find(nodeHash); if(seedInfoIt != seedInfoMap.end()) { // TODO: Verify if doing get on listener future stalls program forever... Opendht documentation is not clear on this DhtKey dhtKey(nodeHash); node.cancelListen(dhtKey.getNewDataListenerKey(), seedInfoIt->second.newDataListenerFuture->get()); node.cancelListen(dhtKey.getRequestOldDataKey(), seedInfoIt->second.requestOldDataListenerFuture->get()); node.cancelListen(*seedInfoIt->second.reponseKeyInfoHash, seedInfoIt->second.responseKeyFuture->get()); seedInfoMap.erase(seedInfoIt); } } unique_ptr Database::create(const string &ownerName, const string &nodeName) { return create(ownerName, Signature::KeyPair(), nodeName); } unique_ptr Database::create(const string &ownerName, const Signature::KeyPair &keyPair, const string &nodeName) { // TODO: Should this be declared static? is there any difference in behavior/performance? boost::uuids::random_generator uuidGen; auto adminGroupId = uuidGen(); auto adminGroup = new Group("administrator", adminGroupId.data, ADMIN_PERMISSION); LocalUser *nodeAdminUser = LocalUser::create(keyPair, ownerName, adminGroup); // Header sibs::SafeSerializer serializer; serializer.add(DATABASE_CREATE_PACKET_STRUCTURE_VERSION); // Packet structure version u64 timestampCombined = getSyncedTimestampUtc().getCombined(); serializer.add(timestampCombined); serializer.add((u8*)nodeAdminUser->getPublicKey().getData(), PUBLIC_KEY_NUM_BYTES); serializer.add(adminGroupId.data, adminGroupId.size()); // Encrypted body sibs::SafeSerializer encryptedSerializer; assert(nodeAdminUser->getName().size() <= 255); encryptedSerializer.add((u8)nodeAdminUser->getName().size()); encryptedSerializer.add((u8*)nodeAdminUser->getName().data(), nodeAdminUser->getName().size()); assert(nodeName.size() <= 255); encryptedSerializer.add((u8)nodeName.size()); encryptedSerializer.add((u8*)nodeName.data(), nodeName.size()); try { Encryption encryptedBody(DataView(encryptedSerializer.getBuffer().data(), encryptedSerializer.getBuffer().size())); DataView requestData = combine(serializer, encryptedBody); shared_ptr hashRequestKey = make_shared(requestData.data, requestData.size); databaseStorage.setNodeDecryptionKey(*hashRequestKey, DataView(encryptedBody.getKey().data, encryptedBody.getKey().size)); databaseStorage.createStorage(*hashRequestKey, adminGroup, timestampCombined, (const u8*)requestData.data, requestData.size, serializer.getBuffer().size()); stagedCreateObjects.emplace_back(make_unique(requestData, hashRequestKey)); assert(encryptedBody.getKey().size == ENCRYPTION_KEY_BYTE_SIZE); auto key = make_shared(new char[encryptedBody.getKey().size], encryptedBody.getKey().size); memcpy(key->data, encryptedBody.getKey().data, encryptedBody.getKey().size); return make_unique(nodeAdminUser, move(key), hashRequestKey); } catch (EncryptionException &e) { throw DatabaseCreateException("Failed to encrypt data for 'create' request"); } } void Database::addData(const DatabaseNode &nodeInfo, const LocalUser *userToPerformActionWith, DataView dataToAdd) { if(!userToPerformActionWith->isAllowedToPerformAction(PermissionType::ADD_DATA)) { // TODO: User might have permission to perform operation, but we haven't got the packet that adds user to the group with the permission, // or we haven't received the packet that modifies group with the permission to perform the operation. // This also means that an user can be in a group that has permission to perform the operation and then later be removed from it, // and remote peers would accept our request to perform operation if they haven't received the operation that removes the user from the group. // How to handle this? string errMsg = "User "; errMsg += userToPerformActionWith->getName(); errMsg += " is not allowed to perform the operation: ADD_USER"; throw PermissionDeniedException(errMsg); } sibs::SafeSerializer serializer; serializer.add(DATABASE_ADD_PACKET_STRUCTURE_VERSION); u64 timestampCombined = getSyncedTimestampUtc().getCombined(); serializer.add(timestampCombined); serializer.add(DatabaseOperation::ADD_DATA); DataView encryptionKey(nodeInfo.getNodeEncryptionKey()->data, ENCRYPTION_KEY_BYTE_SIZE); Encryption encryptedBody(dataToAdd, DataView(), encryptionKey); DataView requestData = combine(serializer, encryptedBody); string signedRequestData = userToPerformActionWith->getPrivateKey().sign(requestData); DataView stagedAddObject = combine(userToPerformActionWith->getPublicKey(), signedRequestData); Hash requestDataHash(stagedAddObject.data, stagedAddObject.size); DataView encryptedDataView((char*)requestData.data + serializer.getBuffer().size(), requestData.size - serializer.getBuffer().size()); databaseStorage.appendStorage(*nodeInfo.getRequestHash(), requestDataHash, DatabaseOperation::ADD_DATA, userToPerformActionWith, timestampCombined, (u8*)stagedAddObject.data, stagedAddObject.size, encryptedDataView); delete[] (char*)requestData.data; stagedAddObjects.emplace_back(make_unique(stagedAddObject, nodeInfo.getRequestHash())); } Group* getGroupWithRightsToAddUserToGroup(const vector &groups, Group *groupToAddUserTo) { for(auto group : groups) { const auto &groupPermission = group->getPermission(); if(groupPermission.getFlag(PermissionType::ADD_USER_LOWER_LEVEL) && groupPermission.getPermissionLevel() < groupToAddUserTo->getPermission().getPermissionLevel()) { return group; } else if(groupPermission.getFlag(PermissionType::ADD_USER_SAME_LEVEL) && groupPermission.getPermissionLevel() == groupToAddUserTo->getPermission().getPermissionLevel()) { return group; } } return nullptr; } void Database::addUser(const DatabaseNode &nodeInfo, const LocalUser *userToPerformActionWith, const string &userToAddName, const Signature::PublicKey &userToAddPublicKey, Group *groupToAddUserTo) { auto groupWithAddUserRights = getGroupWithRightsToAddUserToGroup(userToPerformActionWith->getGroups(), groupToAddUserTo); if(!groupWithAddUserRights) { string errMsg = "The user "; errMsg += userToPerformActionWith->getName(); errMsg += " does not belong to any group that is allowed to add an user to the group "; errMsg += groupToAddUserTo->getName(); throw PermissionDeniedException(errMsg); } sibs::SafeSerializer serializer; serializer.add(DATABASE_ADD_PACKET_STRUCTURE_VERSION); u64 timestampCombined = getSyncedTimestampUtc().getCombined(); serializer.add(timestampCombined); serializer.add(DatabaseOperation::ADD_USER); assert(userToAddName.size() <= 255); usize serializedEncryptedDataOffset = serializer.getBuffer().size(); serializer.add((u8)userToAddName.size()); DataView encryptionKey(nodeInfo.getNodeEncryptionKey()->data, ENCRYPTION_KEY_BYTE_SIZE); Encryption encryptedUserName(DataView((void*)userToAddName.data(), userToAddName.size()), DataView(), encryptionKey); serializer.add((u8*)encryptedUserName.getNonce().data, ENCRYPTION_NONCE_BYTE_SIZE); assert(encryptedUserName.getCipherText().size == ENCRYPTION_CHECKSUM_BYTE_SIZE + userToAddName.size()); serializer.add((u8*)encryptedUserName.getCipherText().data, ENCRYPTION_CHECKSUM_BYTE_SIZE + userToAddName.size()); usize serializedEncryptedDataSize = serializer.getBuffer().size() - serializedEncryptedDataOffset; serializer.add((u8*)userToAddPublicKey.getData(), PUBLIC_KEY_NUM_BYTES); serializer.add((uint8_t*)groupToAddUserTo->getId().data, groupToAddUserTo->getId().size); DataView requestData { serializer.getBuffer().data(), serializer.getBuffer().size() }; string signedRequestData = userToPerformActionWith->getPrivateKey().sign(requestData); DataView stagedAddObject = combine(userToPerformActionWith->getPublicKey(), signedRequestData); Hash requestDataHash(stagedAddObject.data, stagedAddObject.size); DataView encryptedDataView(nullptr, 0); auto userToAdd = RemoteUser::create(userToAddPublicKey, userToAddName, groupToAddUserTo); databaseStorage.addUser(*nodeInfo.getRequestHash(), userToAdd); databaseStorage.appendStorage(*nodeInfo.getRequestHash(), requestDataHash, DatabaseOperation::ADD_USER, userToPerformActionWith, timestampCombined, (u8*)stagedAddObject.data, stagedAddObject.size, encryptedDataView); stagedAddObjects.emplace_back(make_unique(stagedAddObject, nodeInfo.getRequestHash())); } void Database::commit() { // TODO: Combine staged objects into one object for efficiency. try { Log::debug("Num objects to create: %zu", stagedCreateObjects.size()); for(const auto &stagedObject : stagedCreateObjects) { commitStagedCreateObject(stagedObject); } Log::debug("Num objects to add: %zu", stagedAddObjects.size()); for(const auto &stagedObject : stagedAddObjects) { commitStagedAddObject(stagedObject); } } catch (exception &e) { // TODO: Add rollback Log::error("Failed to commit, reason: %s", e.what()); } for(const auto &stagedObject : stagedCreateObjects) { delete[] (char*)stagedObject->data.data; } stagedCreateObjects.clear(); for(const auto &stagedObject : stagedAddObjects) { delete[] (char*)stagedObject->data.data; } stagedAddObjects.clear(); // TODO: Add node.listen here to get notified when remote peers got the commit, then we can say we can return } void Database::commitStagedCreateObject(const unique_ptr &stagedObject) { DhtKey dhtKey(*stagedObject->requestKey); Value createDataValue((u8*)stagedObject->data.data, stagedObject->data.size); node.put(dhtKey.getNewDataListenerKey(), move(createDataValue), [](bool ok) { // TODO: Handle failure to put data if(!ok) Log::warn("Failed to put: %s, what to do?", "commitStagedCreateObject"); }/* TODO: How to make this work?, time_point(), false*/); } void Database::commitStagedAddObject(const unique_ptr &stagedObject) { DhtKey dhtKey(*stagedObject->requestKey); Value createDataValue((u8*)stagedObject->data.data, stagedObject->data.size); node.put(dhtKey.getNewDataListenerKey(), move(createDataValue), [](bool ok) { // TODO: Handle failure to put data if(!ok) Log::warn("Failed to put: %s, what to do?", "commitStagedAddObject"); }/* TODO: How to make this work?, time_point(), false*/); } ntp::NtpTimestamp Database::getSyncedTimestampUtc() const { while(!timestampSynced) { this_thread::sleep_for(10ms); } struct timeval currentLocalTime; gettimeofday(¤tLocalTime, NULL); ntp::NtpTimestamp timestamp; timestamp.seconds = currentLocalTime.tv_sec - timeOffset; timestamp.fractions = currentLocalTime.tv_usec - timeOffsetFraction; return timestamp; } void Database::deserializeCreateRequest(const shared_ptr &value, const Hash &hash, const shared_ptr encryptionKey) { sibs::SafeDeserializer deserializer(value->data.data(), value->data.size()); u16 packetStructureVersion = deserializer.extract(); if(packetStructureVersion != DATABASE_CREATE_PACKET_STRUCTURE_VERSION) { string errMsg = "Received 'create' request with packet structure version "; errMsg += to_string(packetStructureVersion); errMsg += ", but our packet structure version is "; errMsg += to_string(DATABASE_CREATE_PACKET_STRUCTURE_VERSION); throw sibs::DeserializeException(errMsg); } u64 creationDate = deserializer.extract(); if(creationDate > getSyncedTimestampUtc().getCombined()) throw sibs::DeserializeException("Packet is from the future"); char creatorPublicKeyRaw[PUBLIC_KEY_NUM_BYTES]; deserializer.extract((u8*)creatorPublicKeyRaw, PUBLIC_KEY_NUM_BYTES); Signature::PublicKey userPublicKey(creatorPublicKeyRaw, PUBLIC_KEY_NUM_BYTES); uint8_t adminGroupId[GROUP_ID_LENGTH]; deserializer.extract(adminGroupId, GROUP_ID_LENGTH); if(deserializer.getSize() < ENCRYPTION_NONCE_BYTE_SIZE) throw sibs::DeserializeException("Unsigned encrypted body is too small (unable to extract nonce)"); auto adminGroup = new Group("administrator", adminGroupId, ADMIN_PERMISSION); // TODO: Username is encrypted, we dont know it... unless we have encryption key, in which case we should modify the user name and set it auto creatorUser = RemoteUser::create(userPublicKey, "ENCRYPTED USER NAME", adminGroup); databaseStorage.createStorage(hash, adminGroup, creationDate, value->data.data(), value->data.size(), value->data.size() - deserializer.getSize()); } void Database::deserializeAddRequest(const shared_ptr &value, const Hash &requestDataHash, const std::shared_ptr &nodeHash, const shared_ptr encryptionKey) { sibs::SafeDeserializer deserializer(value->data.data(), value->data.size()); char creatorPublicKeyRaw[PUBLIC_KEY_NUM_BYTES]; deserializer.extract((u8*)creatorPublicKeyRaw, PUBLIC_KEY_NUM_BYTES); Signature::PublicKey creatorPublicKey(creatorPublicKeyRaw, PUBLIC_KEY_NUM_BYTES); DataView signedData((void*)deserializer.getBuffer(), deserializer.getSize()); string unsignedData = creatorPublicKey.unsign(signedData); sibs::SafeDeserializer deserializerUnsigned((u8*)unsignedData.data(), unsignedData.size()); u16 packetStructureVersion = deserializerUnsigned.extract(); if(packetStructureVersion != DATABASE_CREATE_PACKET_STRUCTURE_VERSION) { string errMsg = "Received 'create' request with packet structure version "; errMsg += to_string(packetStructureVersion); errMsg += ", but our packet structure version is "; errMsg += to_string(DATABASE_CREATE_PACKET_STRUCTURE_VERSION); throw sibs::DeserializeException(errMsg); } u64 creationDate = deserializerUnsigned.extract(); if(creationDate > getSyncedTimestampUtc().getCombined()) throw sibs::DeserializeException("Packet is from the future"); DatabaseOperation operation = deserializerUnsigned.extract(); #if 0 const Hash *node = databaseStorage.getNodeByUserPublicKey(creatorPublicKey); if(!node) { // The user (public key) could belong to a node but we might not have retrieved the node info yet since data may // not be retrieved in order. // Data in quarantine is processed when 'create' packet is received or removed after 60 seconds databaseStorage.addToQuarantine(requestDataHash, creatorPublicKey, creationDate, value->data.data(), value->data.size()); throw RequestQuarantineException(); } #endif auto creatorUser = databaseStorage.getUserByPublicKey(*nodeHash, creatorPublicKey); if(!creatorUser) { // TODO: Add to quarantine string errMsg = "User with public key "; errMsg += creatorPublicKey.toString(); errMsg += " does not exist in code "; errMsg += nodeHash->toString(); throw sibs::DeserializeException(errMsg); } DataView encryptedDataView((void*)deserializerUnsigned.getBuffer(), deserializerUnsigned.getSize()); if(operation == DatabaseOperation::ADD_DATA) { if(deserializerUnsigned.getSize() < ENCRYPTION_NONCE_BYTE_SIZE) throw sibs::DeserializeException("Unsigned encrypted body is too small (unable to extract nonce)"); if(!creatorUser->isAllowedToPerformAction(PermissionType::ADD_DATA)) { // TODO: User might have permission to perform operation, but we haven't got the packet that adds user to the group with the permission, // or we haven't received the packet that modifies group with the permission to perform the operation. // This also means that an user can be in a group that has permission to perform the operation and then later be removed from it, // and remote peers would accept our request to perform operation if they haven't received the operation that removes the user from the group. // How to handle this? string errMsg = "User "; errMsg += creatorUser->getName(); errMsg += " is not allowed to add data to node "; errMsg += nodeHash->toString(); throw PermissionDeniedException(errMsg); } // TODO: Verify there isn't already data with same timestamp for this node. Same for quarantine. // TODO: We might receive 'add' data packet before 'create'. If that happens, we should put it in quarantine and process it later. databaseStorage.appendStorage(*nodeHash, requestDataHash, operation, creatorUser, creationDate, value->data.data(), value->data.size(), encryptedDataView); } else if(operation == DatabaseOperation::ADD_USER) { u8 nameLength = deserializerUnsigned.extract(); u8 nonce[ENCRYPTION_NONCE_BYTE_SIZE]; deserializerUnsigned.extract(nonce, ENCRYPTION_NONCE_BYTE_SIZE); DataView dataToDecrypt((void*)deserializerUnsigned.getBuffer(), ENCRYPTION_CHECKSUM_BYTE_SIZE + nameLength); sibs::SafeDeserializer deserializerSkippedEncryptedData(deserializerUnsigned.getBuffer() + ENCRYPTION_CHECKSUM_BYTE_SIZE + nameLength, PUBLIC_KEY_NUM_BYTES + GROUP_ID_LENGTH); char userToAddPublicKeyRaw[PUBLIC_KEY_NUM_BYTES]; deserializerSkippedEncryptedData.extract((u8*)userToAddPublicKeyRaw, PUBLIC_KEY_NUM_BYTES); Signature::PublicKey userToAddPublicKey(userToAddPublicKeyRaw, PUBLIC_KEY_NUM_BYTES); uint8_t groupId[GROUP_ID_LENGTH]; deserializerSkippedEncryptedData.extract(groupId, GROUP_ID_LENGTH); auto group = databaseStorage.getGroupById(*nodeHash, groupId); if(group) { auto user = RemoteUser::create(userToAddPublicKey, "ENCRYPTED USER NAME", group); // TODO: What if we receive packets in wrong order? (maliciously or non-maliciously). You would be able to register a user to a group with given name // and further registration would be dropped (even if that is the correct one) if(!databaseStorage.addUser(*nodeHash, user)) return; auto creatorUserGroupWithRights = getGroupWithRightsToAddUserToGroup(creatorUser->getGroups(), group); if(!creatorUserGroupWithRights) { // TODO: User might have permission to perform operation, but we haven't got the packet that adds user to the group with the permission, // or we haven't received the packet that modifies group with the permission to perform the operation. // This also means that an user can be in a group that has permission to perform the operation and then later be removed from it, // and remote peers would accept our request to perform operation if they haven't received the operation that removes the user from the group. // How to handle this? string errMsg = "User "; errMsg += creatorUser->getName(); errMsg += " is not allowed to perform the operation: "; errMsg += to_string((u8)operation); throw PermissionDeniedException(errMsg); } // TODO: Verify there isn't already data with same timestamp for this node. Same for quarantine. // TODO: We might receive 'add' data packet before 'create'. If that happens, we should put it in quarantine and process it later. databaseStorage.appendStorage(*nodeHash, requestDataHash, operation, creatorUser, creationDate, value->data.data(), value->data.size(), encryptedDataView); } else { throw sibs::DeserializeException("TODO: Add to quarantine? You can receive ADD_USER packet before you receive ADD_GROUP"); } } else { string errMsg = "Got unexpected operation: "; errMsg += to_string((u8)operation); throw sibs::DeserializeException(errMsg); } } bool Database::listenCreateData(shared_ptr value, const Hash &hash, const shared_ptr encryptionKey) { Log::debug("Got create data"); try { if(databaseStorage.getStorage(hash)) throw DatabaseStorageAlreadyExists("Create request hash is equal to hash already in storage (duplicate data?)"); deserializeCreateRequest(value, hash, encryptionKey); } catch (exception &e) { Log::warn("Failed to deserialize 'create' request: %s", e.what()); } return true; } bool Database::listenAddData(shared_ptr value, const Hash &requestDataHash, const std::shared_ptr nodeHash, const shared_ptr encryptionKey) { Log::debug("Got add data"); try { deserializeAddRequest(value, requestDataHash, nodeHash, encryptionKey); //Log::debug("Got add object, timestamp: %zu", addObject.timestamp); } catch (RequestQuarantineException &e) { Log::warn("Request was put in quarantine, will be processed later"); } catch (exception &e) { Log::warn("Failed to deserialize 'add' request: %s", e.what()); } return true; } void Database::setOnCreateNodeCallback(function callbackFunc) { onCreateNodeCallbackFunc = callbackFunc; } void Database::setOnAddNodeCallback(function callbackFunc) { onAddNodeCallbackFunc = callbackFunc; } void Database::setOnAddUserCallback(function callbackFunc) { onAddUserCallbackFunc = callbackFunc; } DatabaseStorage& Database::getStorage() { return databaseStorage; } }