aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/Database.cpp49
-rw-r--r--src/DatabaseStorage.cpp35
-rw-r--r--src/Signature.cpp6
3 files changed, 82 insertions, 8 deletions
diff --git a/src/Database.cpp b/src/Database.cpp
index cd87845..e3b9f3d 100644
--- a/src/Database.cpp
+++ b/src/Database.cpp
@@ -92,11 +92,11 @@ namespace odhtdb
stagedCreateObjects.emplace_back(StagedCreateObject(key, primaryAdminGroup, timeMicroseconds));
}
- void Database::add(const Key &key, DataView data)
+ void Database::add(const Key &key, DataView data, LocalUser *creator)
{
// TODO: Append fractions to get real microseconds time
u64 timeMicroseconds = ((u64)getSyncedTimestampUtc().seconds) * 1000000ull;
- stagedAddObjects.emplace_back(StagedAddObject(key, data, timeMicroseconds));
+ stagedAddObjects.emplace_back(StagedAddObject(key, data, timeMicroseconds, creator->getPublicKey()));
}
void Database::commit()
@@ -121,6 +121,13 @@ namespace odhtdb
// TODO: Add node.listen here to get notified when remote peers got the commit, then we can say we can return
}
+ // TODO: If same key already exists, fail the operation.
+ // Security issue: A malicious remote peer (or routing peer) could listen to this create request and build their own
+ // create request using same key, to steal ownership of the key.
+ // Possible solution: If odhtdb is for example used to build a chat application, then the key could be the chat channel id
+ // which could be created by hashing channel generated id and ownership information.
+ // Remote peers would then not be able to steal ownership of the key since hash of ownership data has to match the key.
+ // The key (channel id + ownership info) could then be shared with friends and they can use the key to join your channel.
void Database::commitStagedCreateObject(const StagedCreateObject &stagedObject)
{
// TODO: Use (ed25519 or poly1305) and curve25519
@@ -150,6 +157,7 @@ namespace odhtdb
}/* TODO: How to make this work?, time_point(), false*/);
// Post data for listeners of this key
+ /*
Value putKeyValue(serializer.getBuffer().data() + OPENDHT_INFOHASH_LEN, serializer.getBuffer().size() - OPENDHT_INFOHASH_LEN);
node.put(stagedObject.key.hashedKey, move(putKeyValue), [](bool ok)
{
@@ -157,6 +165,7 @@ namespace odhtdb
if(!ok)
fprintf(stderr, "Failed to put for listeners: %s, what to do?\n", "commitStagedCreateObject");
});
+ */
}
void Database::commitStagedAddObject(const StagedAddObject &stagedObject)
@@ -165,8 +174,11 @@ namespace odhtdb
// TODO: Implement gas and price (refill when serving content (seeding) or by waiting. This is done to prevent spamming and bandwidth leeching)
sibs::SafeSerializer serializer;
assert(stagedObject.key.hashedKey.size() == OPENDHT_INFOHASH_LEN);
- serializer.add(stagedObject.key.hashedKey.data(), stagedObject.key.hashedKey.size());
+ serializer.add(stagedObject.key.hashedKey.data(), OPENDHT_INFOHASH_LEN);
serializer.add(stagedObject.timestamp);
+ serializer.add((u8*)stagedObject.creatorPublicKey.getData(), PUBLIC_KEY_NUM_BYTES);
+ assert(stagedObject.data.size < 0xFFFF - 120);
+ serializer.add((u16)stagedObject.data.size);
serializer.add((u8*)stagedObject.data.data, stagedObject.data.size);
// TODO: Verify if serializer buffer needs to survive longer than this scope
@@ -176,16 +188,18 @@ namespace odhtdb
// TODO: Handle failure to put data
if(!ok)
fprintf(stderr, "Failed to put for all: %s, what to do?\n", "commitStagedAddObject");
- }, time_point(), false);
+ });
// Post data for listeners of this key
+ /*
Value putKeyValue(serializer.getBuffer().data() + OPENDHT_INFOHASH_LEN, serializer.getBuffer().size() - OPENDHT_INFOHASH_LEN);
node.put(stagedObject.key.hashedKey, move(putKeyValue), [](bool ok)
{
// TODO: Handle failure to put data
if(!ok)
fprintf(stderr, "Failed to put for listeners: %s, what to do?\n", "commitStagedAddObject");
- }, time_point(), false);
+ });
+ */
}
ntp::NtpTimestamp Database::getSyncedTimestampUtc() const
@@ -242,6 +256,17 @@ namespace odhtdb
deserializer.extract(entryKeyRaw, OPENDHT_INFOHASH_LEN);
result.key.hashedKey = InfoHash(entryKeyRaw, OPENDHT_INFOHASH_LEN);
result.timestamp = deserializer.extract<u64>();
+
+ char creatorPublicKeyRaw[PUBLIC_KEY_NUM_BYTES];
+ deserializer.extract((u8*)creatorPublicKeyRaw, PUBLIC_KEY_NUM_BYTES);
+ Signature::PublicKey creatorPublicKey(creatorPublicKeyRaw, PUBLIC_KEY_NUM_BYTES);
+
+ u16 dataSize = deserializer.extract<u16>();
+ char *data = (char*)malloc(dataSize);
+ if(!data)
+ throw sibs::DeserializeException("Failed to allocate memory for add request");
+ result.data.data = data;
+ result.data.size = dataSize;
return result;
}
@@ -253,12 +278,17 @@ namespace odhtdb
{
// TODO: Verify createObject timestamp is not in the future
StagedCreateObject createObject = deserializeCreateRequest(value);
- delete createObject.primaryAdminGroup;
+ databaseStorage.createStorage(createObject.key, { createObject.primaryAdminGroup }, createObject.timestamp);
+ //delete createObject.primaryAdminGroup;
}
catch (sibs::DeserializeException &e)
{
fprintf(stderr, "Warning: Failed to deserialize 'create' request: %s\n", e.what());
}
+ catch (DatabaseStorageAlreadyExists &e)
+ {
+ fprintf(stderr, "Warning: Failed to deserialize 'create' request: %s\n", e.what());
+ }
return true;
}
@@ -267,12 +297,19 @@ namespace odhtdb
printf("Got add data\n");
try
{
+ // TODO: Verify createObject timestamp is not in the future
StagedAddObject addObject = deserializeAddRequest(value);
+ databaseStorage.appendStorage(addObject.key, addObject.data, addObject.timestamp, addObject.creatorPublicKey);
+ //free(addObject.data.data);
}
catch (sibs::DeserializeException &e)
{
fprintf(stderr, "Warning: Failed to deserialize 'add' request: %s\n", e.what());
}
+ catch (DatabaseStorageNotFound &e)
+ {
+ fprintf(stderr, "Warning: Failed to deserialize 'add' request: %s\n", e.what());
+ }
return true;
}
}
diff --git a/src/DatabaseStorage.cpp b/src/DatabaseStorage.cpp
new file mode 100644
index 0000000..2028c63
--- /dev/null
+++ b/src/DatabaseStorage.cpp
@@ -0,0 +1,35 @@
+#include "../include/DatabaseStorage.hpp"
+
+using namespace std;
+
+namespace odhtdb
+{
+ void DatabaseStorage::createStorage(const Key &key, vector<Group*> &&groups, u64 timestamp)
+ {
+ if(storageMap.find(key) != storageMap.end())
+ {
+ string errMsg = "Database storage with key ";
+ errMsg += key.hashedKey.toString();
+ errMsg += " already exists";
+ throw DatabaseStorageAlreadyExists(errMsg);
+ }
+
+ DatabaseStorageObjectList *databaseStorageObjectList = new DatabaseStorageObjectList();
+ databaseStorageObjectList->timestamp = timestamp;
+ databaseStorageObjectList->groups = move(groups);
+ storageMap[key] = databaseStorageObjectList;
+ }
+
+ void DatabaseStorage::appendStorage(const Key &key, DataView &data, u64 timestamp, const Signature::PublicKey &creatorPublicKey)
+ {
+ auto it = storageMap.find(key);
+ if(it == storageMap.end())
+ {
+ string errMsg = "Database storage with key ";
+ errMsg += key.hashedKey.toString();
+ errMsg += " not found";
+ throw DatabaseStorageNotFound(errMsg);
+ }
+ it->second->objects.push_back({data, timestamp, creatorPublicKey});
+ }
+}
diff --git a/src/Signature.cpp b/src/Signature.cpp
index 804047e..946d754 100644
--- a/src/Signature.cpp
+++ b/src/Signature.cpp
@@ -9,7 +9,9 @@ namespace odhtdb
{
namespace Signature
{
- PublicKey::PublicKey(char *_data, size_t size)
+ PublicKey PublicKey::ZERO("\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", PUBLIC_KEY_NUM_BYTES);
+
+ PublicKey::PublicKey(const char *_data, size_t size)
{
if(size != PUBLIC_KEY_NUM_BYTES)
{
@@ -41,7 +43,7 @@ namespace odhtdb
return result;
}
- PrivateKey::PrivateKey(char *_data, size_t size)
+ PrivateKey::PrivateKey(const char *_data, size_t size)
{
if(size != PRIVATE_KEY_NUM_BYTES)
{