diff --git a/.sqlx/query-0ac5ed5fc0b22aeb5e3b72b81b70dc07c95361aaa2aa0051628b566b99d15b2c.json b/.sqlx/query-0ac5ed5fc0b22aeb5e3b72b81b70dc07c95361aaa2aa0051628b566b99d15b2c.json new file mode 100644 index 00000000..5e91f1f7 --- /dev/null +++ b/.sqlx/query-0ac5ed5fc0b22aeb5e3b72b81b70dc07c95361aaa2aa0051628b566b99d15b2c.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM shared_profiles_versions\n WHERE shared_profile_id = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "0ac5ed5fc0b22aeb5e3b72b81b70dc07c95361aaa2aa0051628b566b99d15b2c" +} diff --git a/.sqlx/query-0c2addb0d7a87fa558821ff8e943bbb751fb2bdc22d1a5368f61cc7827586840.json b/.sqlx/query-0c2addb0d7a87fa558821ff8e943bbb751fb2bdc22d1a5368f61cc7827586840.json deleted file mode 100644 index 667bdcbf..00000000 --- a/.sqlx/query-0c2addb0d7a87fa558821ff8e943bbb751fb2bdc22d1a5368f61cc7827586840.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO files (id, version_id, url, filename, is_primary, size, file_type)\n VALUES ($1, $2, $3, $4, $5, $6, $7)\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8", - "Varchar", - "Varchar", - "Bool", - "Int4", - "Varchar" - ] - }, - "nullable": [] - }, - "hash": "0c2addb0d7a87fa558821ff8e943bbb751fb2bdc22d1a5368f61cc7827586840" -} diff --git a/.sqlx/query-0e9c50b8a7a9ef1155c1d893979ae5fe6371d397ecee62cb68607f608a772af0.json b/.sqlx/query-0e9c50b8a7a9ef1155c1d893979ae5fe6371d397ecee62cb68607f608a772af0.json new file mode 100644 index 00000000..a548fffb --- /dev/null +++ b/.sqlx/query-0e9c50b8a7a9ef1155c1d893979ae5fe6371d397ecee62cb68607f608a772af0.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO versions_files (version_id, file_id, is_primary)\n VALUES ($1, $2, $3)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Bool" + ] + }, + "nullable": [] + }, + "hash": "0e9c50b8a7a9ef1155c1d893979ae5fe6371d397ecee62cb68607f608a772af0" +} diff --git a/.sqlx/query-0b79ae3825e05ae07058a0a9d02fb0bd68ce37f3c7cf0356d565c23520988816.json b/.sqlx/query-0fa92183a6de18722c9ba740a84c0efaa3d8ad9f3e014b9438ab98ca75356eaf.json similarity index 55% rename from .sqlx/query-0b79ae3825e05ae07058a0a9d02fb0bd68ce37f3c7cf0356d565c23520988816.json rename to .sqlx/query-0fa92183a6de18722c9ba740a84c0efaa3d8ad9f3e014b9438ab98ca75356eaf.json index 6d206d58..4230e3fc 100644 --- a/.sqlx/query-0b79ae3825e05ae07058a0a9d02fb0bd68ce37f3c7cf0356d565c23520988816.json +++ b/.sqlx/query-0fa92183a6de18722c9ba740a84c0efaa3d8ad9f3e014b9438ab98ca75356eaf.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT f.id, f.version_id, v.mod_id, f.url, f.filename, f.is_primary, f.size, f.file_type,\n JSONB_AGG(DISTINCT jsonb_build_object('algorithm', h.algorithm, 'hash', encode(h.hash, 'escape'))) filter (where h.hash is not null) hashes\n FROM files f\n INNER JOIN versions v on v.id = f.version_id\n INNER JOIN hashes h on h.file_id = f.id\n WHERE h.algorithm = $1 AND h.hash = ANY($2)\n GROUP BY f.id, v.mod_id, v.date_published\n ORDER BY v.date_published\n ", + "query": "\n SELECT f.id, vf.version_id, v.mod_id, f.url, f.filename, vf.is_primary, f.size, f.file_type,\n JSONB_AGG(DISTINCT jsonb_build_object('algorithm', h.algorithm, 'hash', encode(h.hash, 'escape'))) filter (where h.hash is not null) hashes\n FROM files f\n INNER JOIN versions_files vf on vf.file_id = f.id\n INNER JOIN versions v on v.id = vf.version_id\n INNER JOIN mods m on m.id = v.mod_id AND m.status = ANY($3)\n INNER JOIN hashes h on h.file_id = f.id\n WHERE h.algorithm = $1 AND h.hash = ANY($2)\n GROUP BY f.id, v.mod_id, v.date_published, vf.version_id, vf.is_primary\n ORDER BY v.date_published\n ", "describe": { "columns": [ { @@ -52,7 +52,8 @@ "parameters": { "Left": [ "Text", - "ByteaArray" + "ByteaArray", + "TextArray" ] }, "nullable": [ @@ -67,5 +68,5 @@ null ] }, - "hash": "0b79ae3825e05ae07058a0a9d02fb0bd68ce37f3c7cf0356d565c23520988816" + "hash": "0fa92183a6de18722c9ba740a84c0efaa3d8ad9f3e014b9438ab98ca75356eaf" } diff --git a/.sqlx/query-10a892bfd4e988b5491ca631d36fcdd8e36ef1b07f9970ac4a4b8b82db3b42b9.json b/.sqlx/query-10a892bfd4e988b5491ca631d36fcdd8e36ef1b07f9970ac4a4b8b82db3b42b9.json new file mode 100644 index 00000000..9f6c9a16 --- /dev/null +++ b/.sqlx/query-10a892bfd4e988b5491ca631d36fcdd8e36ef1b07f9970ac4a4b8b82db3b42b9.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM shared_profiles_links WHERE shared_profile_id = $1 AND id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "10a892bfd4e988b5491ca631d36fcdd8e36ef1b07f9970ac4a4b8b82db3b42b9" +} diff --git a/.sqlx/query-11239992f6177b638bcf48d416cd9f506c076ea43212fe02321d3d7b192eebea.json b/.sqlx/query-11239992f6177b638bcf48d416cd9f506c076ea43212fe02321d3d7b192eebea.json new file mode 100644 index 00000000..51354432 --- /dev/null +++ b/.sqlx/query-11239992f6177b638bcf48d416cd9f506c076ea43212fe02321d3d7b192eebea.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO shared_profiles_versions (shared_profile_id, version_id)\n SELECT * FROM UNNEST($1::bigint[], $2::bigint[])\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8Array", + "Int8Array" + ] + }, + "nullable": [] + }, + "hash": "11239992f6177b638bcf48d416cd9f506c076ea43212fe02321d3d7b192eebea" +} diff --git a/.sqlx/query-16a3482f7dc853615594607413adb0b7bc097d9e99f26843c4c10fa6b969b9d4.json b/.sqlx/query-16a3482f7dc853615594607413adb0b7bc097d9e99f26843c4c10fa6b969b9d4.json new file mode 100644 index 00000000..364b7876 --- /dev/null +++ b/.sqlx/query-16a3482f7dc853615594607413adb0b7bc097d9e99f26843c4c10fa6b969b9d4.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO shared_profiles (\n id, name, owner_id, icon_url, created, updated,\n loader_id, game_id, metadata\n )\n VALUES (\n $1, $2, $3, $4, $5, $6, \n $7, $8, $9\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Varchar", + "Int8", + "Varchar", + "Timestamptz", + "Timestamptz", + "Int4", + "Int4", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "16a3482f7dc853615594607413adb0b7bc097d9e99f26843c4c10fa6b969b9d4" +} diff --git a/.sqlx/query-275c46aac425f17b2bf621be8000142526fffe9d62a7961f0faf35157f9ecff0.json b/.sqlx/query-275c46aac425f17b2bf621be8000142526fffe9d62a7961f0faf35157f9ecff0.json new file mode 100644 index 00000000..e5b80bc7 --- /dev/null +++ b/.sqlx/query-275c46aac425f17b2bf621be8000142526fffe9d62a7961f0faf35157f9ecff0.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT f.id\n FROM hashes h\n INNER JOIN files f ON f.id = h.file_id\n WHERE h.algorithm = $2 AND h.hash = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Bytea", + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "275c46aac425f17b2bf621be8000142526fffe9d62a7961f0faf35157f9ecff0" +} diff --git a/.sqlx/query-2d5ad68ad98955b055d5a81472bee01157fb2de23228f89594b19f507e9f732c.json b/.sqlx/query-2d5ad68ad98955b055d5a81472bee01157fb2de23228f89594b19f507e9f732c.json new file mode 100644 index 00000000..9e26f8d0 --- /dev/null +++ b/.sqlx/query-2d5ad68ad98955b055d5a81472bee01157fb2de23228f89594b19f507e9f732c.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT spf.file_id \n FROM shared_profiles_files spf\n INNER JOIN files f ON f.id = spf.file_id\n INNER JOIN hashes h ON h.file_id = f.id\n WHERE (shared_profile_id = $1 AND (h.hash = ANY($2) OR install_path = ANY($3::text[])))\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "file_id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "ByteaArray", + "TextArray" + ] + }, + "nullable": [ + false + ] + }, + "hash": "2d5ad68ad98955b055d5a81472bee01157fb2de23228f89594b19f507e9f732c" +} diff --git a/.sqlx/query-2eadf8f73266ce5fb700ab4fef5740db29aa86018e16cbd685032f35b2f86fa3.json b/.sqlx/query-2eadf8f73266ce5fb700ab4fef5740db29aa86018e16cbd685032f35b2f86fa3.json new file mode 100644 index 00000000..f15e594e --- /dev/null +++ b/.sqlx/query-2eadf8f73266ce5fb700ab4fef5740db29aa86018e16cbd685032f35b2f86fa3.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT id, shared_profile_id, created, expires\n FROM shared_profiles_links spl\n WHERE spl.shared_profile_id = ANY($1)\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "shared_profile_id", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "created", + "type_info": "Timestamptz" + }, + { + "ordinal": 3, + "name": "expires", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Int8Array" + ] + }, + "nullable": [ + false, + false, + false, + false + ] + }, + "hash": "2eadf8f73266ce5fb700ab4fef5740db29aa86018e16cbd685032f35b2f86fa3" +} diff --git a/.sqlx/query-2f02ab522bd717fe13e2584595f71a9d80301cbca4073c0c6106cc32f9478993.json b/.sqlx/query-2f02ab522bd717fe13e2584595f71a9d80301cbca4073c0c6106cc32f9478993.json new file mode 100644 index 00000000..e1f5f6d9 --- /dev/null +++ b/.sqlx/query-2f02ab522bd717fe13e2584595f71a9d80301cbca4073c0c6106cc32f9478993.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM shared_profiles_files\n WHERE shared_profile_id = $1\n RETURNING file_id\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "file_id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "2f02ab522bd717fe13e2584595f71a9d80301cbca4073c0c6106cc32f9478993" +} diff --git a/.sqlx/query-38eda3e5bd977af134c73e1268fb2e959501eec5cfd5c0c248f212fee9da63f5.json b/.sqlx/query-38eda3e5bd977af134c73e1268fb2e959501eec5cfd5c0c248f212fee9da63f5.json new file mode 100644 index 00000000..c0d8964a --- /dev/null +++ b/.sqlx/query-38eda3e5bd977af134c73e1268fb2e959501eec5cfd5c0c248f212fee9da63f5.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM shared_profiles_users\n WHERE shared_profile_id = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "38eda3e5bd977af134c73e1268fb2e959501eec5cfd5c0c248f212fee9da63f5" +} diff --git a/.sqlx/query-3e824afee6d0c7e6741fef0cccbe9089e641eee5bb5d9dac06e0531e9c3853d9.json b/.sqlx/query-3e824afee6d0c7e6741fef0cccbe9089e641eee5bb5d9dac06e0531e9c3853d9.json new file mode 100644 index 00000000..3adee636 --- /dev/null +++ b/.sqlx/query-3e824afee6d0c7e6741fef0cccbe9089e641eee5bb5d9dac06e0531e9c3853d9.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM hashes\n WHERE EXISTS(\n SELECT 1 FROM files WHERE\n (files.id = ANY($1) AND hashes.file_id = files.id)\n )\n RETURNING encode(hashes.hash, 'escape') hash\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "hash", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int8Array" + ] + }, + "nullable": [ + null + ] + }, + "hash": "3e824afee6d0c7e6741fef0cccbe9089e641eee5bb5d9dac06e0531e9c3853d9" +} diff --git a/.sqlx/query-40992ad8967d190f4b584e3092ea37ee1ce9988070d50dce3708d5de1079340b.json b/.sqlx/query-40992ad8967d190f4b584e3092ea37ee1ce9988070d50dce3708d5de1079340b.json new file mode 100644 index 00000000..d59b5c49 --- /dev/null +++ b/.sqlx/query-40992ad8967d190f4b584e3092ea37ee1ce9988070d50dce3708d5de1079340b.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM files\n WHERE files.id = ANY($1)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8Array" + ] + }, + "nullable": [] + }, + "hash": "40992ad8967d190f4b584e3092ea37ee1ce9988070d50dce3708d5de1079340b" +} diff --git a/.sqlx/query-155361716f9d697c0d961b7bbad30e70698a8e5c9ceaa03b2091e058b58fb938.json b/.sqlx/query-417950f7e332b0d06a2d75693451899877e01abd792fba8bf7c181261fbab49a.json similarity index 52% rename from .sqlx/query-155361716f9d697c0d961b7bbad30e70698a8e5c9ceaa03b2091e058b58fb938.json rename to .sqlx/query-417950f7e332b0d06a2d75693451899877e01abd792fba8bf7c181261fbab49a.json index 46917471..fa92d1aa 100644 --- a/.sqlx/query-155361716f9d697c0d961b7bbad30e70698a8e5c9ceaa03b2091e058b58fb938.json +++ b/.sqlx/query-417950f7e332b0d06a2d75693451899877e01abd792fba8bf7c181261fbab49a.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT v.id id, v.mod_id mod_id FROM files f\n INNER JOIN versions v ON v.id = f.version_id\n WHERE f.url = $1\n ", + "query": "\n SELECT v.id id, v.mod_id mod_id \n FROM files f\n INNER JOIN versions_files vf ON vf.file_id = f.id\n INNER JOIN versions v ON v.id = vf.version_id\n WHERE f.url = $1\n ", "describe": { "columns": [ { @@ -24,5 +24,5 @@ false ] }, - "hash": "155361716f9d697c0d961b7bbad30e70698a8e5c9ceaa03b2091e058b58fb938" + "hash": "417950f7e332b0d06a2d75693451899877e01abd792fba8bf7c181261fbab49a" } diff --git a/.sqlx/query-44e08e05cde8d5b27e6c45482bf349d6ff14d72a43587e7cd984a3c0dd9d0c3f.json b/.sqlx/query-44e08e05cde8d5b27e6c45482bf349d6ff14d72a43587e7cd984a3c0dd9d0c3f.json new file mode 100644 index 00000000..b85ca9f3 --- /dev/null +++ b/.sqlx/query-44e08e05cde8d5b27e6c45482bf349d6ff14d72a43587e7cd984a3c0dd9d0c3f.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM shared_profiles_files\n WHERE file_id = ANY($1::bigint[]) AND shared_profile_id = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8Array", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "44e08e05cde8d5b27e6c45482bf349d6ff14d72a43587e7cd984a3c0dd9d0c3f" +} diff --git a/.sqlx/query-4d784747f424af21136ea82dcf54ee5712e69ad609a030f207918c2e08b34c54.json b/.sqlx/query-4d784747f424af21136ea82dcf54ee5712e69ad609a030f207918c2e08b34c54.json new file mode 100644 index 00000000..b5531d03 --- /dev/null +++ b/.sqlx/query-4d784747f424af21136ea82dcf54ee5712e69ad609a030f207918c2e08b34c54.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT f.id\n FROM files f\n LEFT JOIN versions_files vf ON vf.file_id = f.id\n LEFT JOIN shared_profiles_files spf ON spf.file_id = f.id\n WHERE f.id = ANY($1) AND (vf.version_id IS NOT NULL OR spf.shared_profile_id IS NOT NULL)\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8Array" + ] + }, + "nullable": [ + true + ] + }, + "hash": "4d784747f424af21136ea82dcf54ee5712e69ad609a030f207918c2e08b34c54" +} diff --git a/.sqlx/query-4e4a618fb1e8281777e5083a20d2a75a7e1706ee408d20ae1819098cddbad3aa.json b/.sqlx/query-4e4a618fb1e8281777e5083a20d2a75a7e1706ee408d20ae1819098cddbad3aa.json new file mode 100644 index 00000000..71b8d612 --- /dev/null +++ b/.sqlx/query-4e4a618fb1e8281777e5083a20d2a75a7e1706ee408d20ae1819098cddbad3aa.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO shared_profiles_links (\n id, shared_profile_id, created, expires\n )\n VALUES (\n $1, $2, $3, $4\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Timestamptz", + "Timestamptz" + ] + }, + "nullable": [] + }, + "hash": "4e4a618fb1e8281777e5083a20d2a75a7e1706ee408d20ae1819098cddbad3aa" +} diff --git a/.sqlx/query-e72736bb7fca4df41cf34186b1edf04d6b4d496971aaf87ed1a88e7d64eab823.json b/.sqlx/query-50e1f713ee4a1d424a92c950fb3d0370711b1e698a900a84cba944284c244b5e.json similarity index 71% rename from .sqlx/query-e72736bb7fca4df41cf34186b1edf04d6b4d496971aaf87ed1a88e7d64eab823.json rename to .sqlx/query-50e1f713ee4a1d424a92c950fb3d0370711b1e698a900a84cba944284c244b5e.json index 20c4ed62..32e3783e 100644 --- a/.sqlx/query-e72736bb7fca4df41cf34186b1edf04d6b4d496971aaf87ed1a88e7d64eab823.json +++ b/.sqlx/query-50e1f713ee4a1d424a92c950fb3d0370711b1e698a900a84cba944284c244b5e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT DISTINCT version_id, f.id, f.url, f.filename, f.is_primary, f.size, f.file_type\n FROM files f\n WHERE f.version_id = ANY($1)\n ", + "query": "\n SELECT DISTINCT vf.version_id, f.id, f.url, f.filename, vf.is_primary, f.size, f.file_type\n FROM files f\n INNER JOIN versions_files vf ON vf.file_id = f.id\n WHERE vf.version_id = ANY($1)\n ", "describe": { "columns": [ { @@ -54,5 +54,5 @@ true ] }, - "hash": "e72736bb7fca4df41cf34186b1edf04d6b4d496971aaf87ed1a88e7d64eab823" + "hash": "50e1f713ee4a1d424a92c950fb3d0370711b1e698a900a84cba944284c244b5e" } diff --git a/.sqlx/query-56fa5de99008f7c2941e69451c1e842d092b0604585567eb48cc40e7d44f4a7f.json b/.sqlx/query-56fa5de99008f7c2941e69451c1e842d092b0604585567eb48cc40e7d44f4a7f.json new file mode 100644 index 00000000..9eac2a63 --- /dev/null +++ b/.sqlx/query-56fa5de99008f7c2941e69451c1e842d092b0604585567eb48cc40e7d44f4a7f.json @@ -0,0 +1,24 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT m.slug FROM hashes h\n INNER JOIN files f ON f.id = h.file_id\n INNER JOIN versions_files vf on vf.file_id = f.id\n INNER JOIN versions v ON v.id = vf.version_id\n INNER JOIN mods m ON m.id = v.mod_id AND m.status = ANY($3)\n WHERE h.algorithm = $2 AND h.hash = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "slug", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Bytea", + "Text", + "TextArray" + ] + }, + "nullable": [ + true + ] + }, + "hash": "56fa5de99008f7c2941e69451c1e842d092b0604585567eb48cc40e7d44f4a7f" +} diff --git a/.sqlx/query-58989968246eeeec7f53e2ac7a40398632faebf858ba300194dd2cc83844d48b.json b/.sqlx/query-58989968246eeeec7f53e2ac7a40398632faebf858ba300194dd2cc83844d48b.json new file mode 100644 index 00000000..6da9b468 --- /dev/null +++ b/.sqlx/query-58989968246eeeec7f53e2ac7a40398632faebf858ba300194dd2cc83844d48b.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT EXISTS(SELECT 1 FROM shared_profiles_links WHERE id=$1)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "exists", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "58989968246eeeec7f53e2ac7a40398632faebf858ba300194dd2cc83844d48b" +} diff --git a/.sqlx/query-5c4262689205aafdd97a74bee0003f39eef0a34c97f97a939c14fb8fe349f7eb.json b/.sqlx/query-5c4262689205aafdd97a74bee0003f39eef0a34c97f97a939c14fb8fe349f7eb.json deleted file mode 100644 index 4fe0c389..00000000 --- a/.sqlx/query-5c4262689205aafdd97a74bee0003f39eef0a34c97f97a939c14fb8fe349f7eb.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE files\n SET is_primary = TRUE\n WHERE (id = $1)\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [] - }, - "hash": "5c4262689205aafdd97a74bee0003f39eef0a34c97f97a939c14fb8fe349f7eb" -} diff --git a/.sqlx/query-5ff2116335c8899784787dfccba9d9e470d38cc13114b18a6c02507787502dfe.json b/.sqlx/query-5ff2116335c8899784787dfccba9d9e470d38cc13114b18a6c02507787502dfe.json new file mode 100644 index 00000000..5a45fc7c --- /dev/null +++ b/.sqlx/query-5ff2116335c8899784787dfccba9d9e470d38cc13114b18a6c02507787502dfe.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT shared_profile_id, version_id\n FROM shared_profiles_versions spv\n WHERE spv.shared_profile_id = ANY($1)\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "shared_profile_id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "version_id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8Array" + ] + }, + "nullable": [ + false, + true + ] + }, + "hash": "5ff2116335c8899784787dfccba9d9e470d38cc13114b18a6c02507787502dfe" +} diff --git a/.sqlx/query-6638b1abe406e55a6b92ea19055448b20740bf04646701766e1b282b423896a4.json b/.sqlx/query-6638b1abe406e55a6b92ea19055448b20740bf04646701766e1b282b423896a4.json new file mode 100644 index 00000000..8c14437e --- /dev/null +++ b/.sqlx/query-6638b1abe406e55a6b92ea19055448b20740bf04646701766e1b282b423896a4.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT id FROM versions\n WHERE mod_id = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "6638b1abe406e55a6b92ea19055448b20740bf04646701766e1b282b423896a4" +} diff --git a/.sqlx/query-6c8b8a2f11c0b4e7a5973547fe1611a0fa4ef366d5c8a91d9fb9a1360ea04d46.json b/.sqlx/query-6c8b8a2f11c0b4e7a5973547fe1611a0fa4ef366d5c8a91d9fb9a1360ea04d46.json deleted file mode 100644 index 7833fda9..00000000 --- a/.sqlx/query-6c8b8a2f11c0b4e7a5973547fe1611a0fa4ef366d5c8a91d9fb9a1360ea04d46.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT EXISTS(SELECT 1 FROM hashes h\n INNER JOIN files f ON f.id = h.file_id\n INNER JOIN versions v ON v.id = f.version_id\n WHERE h.algorithm = $2 AND h.hash = $1 AND v.mod_id != $3)\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "exists", - "type_info": "Bool" - } - ], - "parameters": { - "Left": [ - "Bytea", - "Text", - "Int8" - ] - }, - "nullable": [ - null - ] - }, - "hash": "6c8b8a2f11c0b4e7a5973547fe1611a0fa4ef366d5c8a91d9fb9a1360ea04d46" -} diff --git a/.sqlx/query-6d883ea05aead20f571a0f63bfd63f1d432717ec7a0fb9ab29e01fcb061b3afc.json b/.sqlx/query-6d883ea05aead20f571a0f63bfd63f1d432717ec7a0fb9ab29e01fcb061b3afc.json deleted file mode 100644 index 55a5015c..00000000 --- a/.sqlx/query-6d883ea05aead20f571a0f63bfd63f1d432717ec7a0fb9ab29e01fcb061b3afc.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE files\n SET is_primary = FALSE\n WHERE (version_id = $1)\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [] - }, - "hash": "6d883ea05aead20f571a0f63bfd63f1d432717ec7a0fb9ab29e01fcb061b3afc" -} diff --git a/.sqlx/query-6fcd4595999ef05fb09ef56c639486b2ee68732db8ebb11198a0c97ee59d2555.json b/.sqlx/query-6fcd4595999ef05fb09ef56c639486b2ee68732db8ebb11198a0c97ee59d2555.json new file mode 100644 index 00000000..6d36b3d5 --- /dev/null +++ b/.sqlx/query-6fcd4595999ef05fb09ef56c639486b2ee68732db8ebb11198a0c97ee59d2555.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO shared_profiles_versions (shared_profile_id, version_id) VALUES ($1, $2)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "6fcd4595999ef05fb09ef56c639486b2ee68732db8ebb11198a0c97ee59d2555" +} diff --git a/.sqlx/query-78d0c0ba63ff65686b5e32ed14724d18d178b915145fea6aa479115658978072.json b/.sqlx/query-78d0c0ba63ff65686b5e32ed14724d18d178b915145fea6aa479115658978072.json new file mode 100644 index 00000000..1b45a675 --- /dev/null +++ b/.sqlx/query-78d0c0ba63ff65686b5e32ed14724d18d178b915145fea6aa479115658978072.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE shared_profiles\n SET icon_url = NULL, color = NULL\n WHERE (id = $1)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "78d0c0ba63ff65686b5e32ed14724d18d178b915145fea6aa479115658978072" +} diff --git a/.sqlx/query-7e40a01e8cbc2740cf8a840443e77837e78eddbf546756569b16e8dec8f16354.json b/.sqlx/query-7e40a01e8cbc2740cf8a840443e77837e78eddbf546756569b16e8dec8f16354.json new file mode 100644 index 00000000..d9d8feb7 --- /dev/null +++ b/.sqlx/query-7e40a01e8cbc2740cf8a840443e77837e78eddbf546756569b16e8dec8f16354.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT id, shared_profile_id, created, expires\n FROM shared_profiles_links spl\n WHERE spl.id = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "shared_profile_id", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "created", + "type_info": "Timestamptz" + }, + { + "ordinal": 3, + "name": "expires", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false + ] + }, + "hash": "7e40a01e8cbc2740cf8a840443e77837e78eddbf546756569b16e8dec8f16354" +} diff --git a/.sqlx/query-8742663fff60d3c2e03844f9330b812287b09b71546c2b7864fbacc3c91e5dfe.json b/.sqlx/query-8742663fff60d3c2e03844f9330b812287b09b71546c2b7864fbacc3c91e5dfe.json new file mode 100644 index 00000000..90435003 --- /dev/null +++ b/.sqlx/query-8742663fff60d3c2e03844f9330b812287b09b71546c2b7864fbacc3c91e5dfe.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM shared_profiles_versions WHERE shared_profile_id = $1", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "8742663fff60d3c2e03844f9330b812287b09b71546c2b7864fbacc3c91e5dfe" +} diff --git a/.sqlx/query-a1ba3b5cc50b1eb24f5529e06be1439f4a313c4ea8845c2733db752e53f5ae1c.json b/.sqlx/query-8a12a47c8e17fef339affb7b3c43870a2801090a0568de1e48bbc69cfaf019b6.json similarity index 55% rename from .sqlx/query-a1ba3b5cc50b1eb24f5529e06be1439f4a313c4ea8845c2733db752e53f5ae1c.json rename to .sqlx/query-8a12a47c8e17fef339affb7b3c43870a2801090a0568de1e48bbc69cfaf019b6.json index 3d018fc4..9d53535e 100644 --- a/.sqlx/query-a1ba3b5cc50b1eb24f5529e06be1439f4a313c4ea8845c2733db752e53f5ae1c.json +++ b/.sqlx/query-8a12a47c8e17fef339affb7b3c43870a2801090a0568de1e48bbc69cfaf019b6.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT COUNT(f.id) FROM files f\n INNER JOIN versions v on f.version_id = v.id AND v.status = ANY($2)\n INNER JOIN mods m on v.mod_id = m.id AND m.status = ANY($1)\n ", + "query": "\n SELECT COUNT(f.id) FROM files f\n INNER JOIN versions_files vf ON vf.file_id = f.id\n INNER JOIN versions v ON v.id = vf.version_id AND v.status = ANY($2)\n INNER JOIN mods m on v.mod_id = m.id AND m.status = ANY($1)\n ", "describe": { "columns": [ { @@ -19,5 +19,5 @@ null ] }, - "hash": "a1ba3b5cc50b1eb24f5529e06be1439f4a313c4ea8845c2733db752e53f5ae1c" + "hash": "8a12a47c8e17fef339affb7b3c43870a2801090a0568de1e48bbc69cfaf019b6" } diff --git a/.sqlx/query-8a37c029e438415db69f4b6f956156bee51b528e122864e87cb50226194a3330.json b/.sqlx/query-8a37c029e438415db69f4b6f956156bee51b528e122864e87cb50226194a3330.json new file mode 100644 index 00000000..ca8f7f14 --- /dev/null +++ b/.sqlx/query-8a37c029e438415db69f4b6f956156bee51b528e122864e87cb50226194a3330.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE shared_profiles SET name = $1 WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "8a37c029e438415db69f4b6f956156bee51b528e122864e87cb50226194a3330" +} diff --git a/.sqlx/query-951adc782b5217df22b7881b0b0281434f3fca004424ac6b859fbc2419e964e9.json b/.sqlx/query-951adc782b5217df22b7881b0b0281434f3fca004424ac6b859fbc2419e964e9.json new file mode 100644 index 00000000..fec998dd --- /dev/null +++ b/.sqlx/query-951adc782b5217df22b7881b0b0281434f3fca004424ac6b859fbc2419e964e9.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "DELETE FROM shared_profiles_users WHERE shared_profile_id = $1 AND user_id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "951adc782b5217df22b7881b0b0281434f3fca004424ac6b859fbc2419e964e9" +} diff --git a/.sqlx/query-cfcc6970c0b469c4afd37bedfd386def7980f6b7006030d4783723861d0e3a38.json b/.sqlx/query-a1b6261d66d6be09c275029047e54a5bac87d0018af1a68184dbc3c048ee6c8b.json similarity index 70% rename from .sqlx/query-cfcc6970c0b469c4afd37bedfd386def7980f6b7006030d4783723861d0e3a38.json rename to .sqlx/query-a1b6261d66d6be09c275029047e54a5bac87d0018af1a68184dbc3c048ee6c8b.json index 64c54a6e..2ec001f9 100644 --- a/.sqlx/query-cfcc6970c0b469c4afd37bedfd386def7980f6b7006030d4783723861d0e3a38.json +++ b/.sqlx/query-a1b6261d66d6be09c275029047e54a5bac87d0018af1a68184dbc3c048ee6c8b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT v.id version_id, v.mod_id project_id, h.hash hash FROM hashes h\n INNER JOIN files f on h.file_id = f.id\n INNER JOIN versions v on f.version_id = v.id\n WHERE h.algorithm = 'sha1' AND h.hash = ANY($1)\n ", + "query": "\n SELECT v.id version_id, v.mod_id project_id, h.hash hash FROM hashes h\n INNER JOIN files f on h.file_id = f.id\n INNER JOIN versions_files vf on vf.file_id = f.id\n INNER JOIN versions v on v.id = vf.version_id\n WHERE h.algorithm = 'sha1' AND h.hash = ANY($1)\n ", "describe": { "columns": [ { @@ -30,5 +30,5 @@ false ] }, - "hash": "cfcc6970c0b469c4afd37bedfd386def7980f6b7006030d4783723861d0e3a38" + "hash": "a1b6261d66d6be09c275029047e54a5bac87d0018af1a68184dbc3c048ee6c8b" } diff --git a/.sqlx/query-a1e58d8dbffff01cb4a25274fb4c98068a871f69f102131030a7ab80009bc8a5.json b/.sqlx/query-a1e58d8dbffff01cb4a25274fb4c98068a871f69f102131030a7ab80009bc8a5.json new file mode 100644 index 00000000..4816ca9c --- /dev/null +++ b/.sqlx/query-a1e58d8dbffff01cb4a25274fb4c98068a871f69f102131030a7ab80009bc8a5.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE shared_profiles\n SET updated = NOW()\n WHERE id = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "a1e58d8dbffff01cb4a25274fb4c98068a871f69f102131030a7ab80009bc8a5" +} diff --git a/.sqlx/query-a6f13c45001aed03871bcd50a76b7cc06e93a598b788dfaf1f05d735132fea27.json b/.sqlx/query-a6f13c45001aed03871bcd50a76b7cc06e93a598b788dfaf1f05d735132fea27.json new file mode 100644 index 00000000..f627750e --- /dev/null +++ b/.sqlx/query-a6f13c45001aed03871bcd50a76b7cc06e93a598b788dfaf1f05d735132fea27.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT encode(hash, 'escape') hash FROM hashes\n WHERE hash = ANY($1)\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "hash", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "ByteaArray" + ] + }, + "nullable": [ + null + ] + }, + "hash": "a6f13c45001aed03871bcd50a76b7cc06e93a598b788dfaf1f05d735132fea27" +} diff --git a/.sqlx/query-ab7b439a8364871fcdb552736bd53a44c06006a6618880e191e8110cd0fc16ba.json b/.sqlx/query-ab7b439a8364871fcdb552736bd53a44c06006a6618880e191e8110cd0fc16ba.json new file mode 100644 index 00000000..2d54f1f1 --- /dev/null +++ b/.sqlx/query-ab7b439a8364871fcdb552736bd53a44c06006a6618880e191e8110cd0fc16ba.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO shared_profiles_files (shared_profile_id, file_id, install_path)\n VALUES ($1, $2, $3)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Varchar" + ] + }, + "nullable": [] + }, + "hash": "ab7b439a8364871fcdb552736bd53a44c06006a6618880e191e8110cd0fc16ba" +} diff --git a/.sqlx/query-ae06d55081b25a05297d8468dc473f498564809f66b08e3ca63b4d4db32ebfa3.json b/.sqlx/query-ae06d55081b25a05297d8468dc473f498564809f66b08e3ca63b4d4db32ebfa3.json new file mode 100644 index 00000000..65b13328 --- /dev/null +++ b/.sqlx/query-ae06d55081b25a05297d8468dc473f498564809f66b08e3ca63b4d4db32ebfa3.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE versions_files\n SET is_primary = TRUE\n WHERE (file_id = $1 AND version_id = $2)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "ae06d55081b25a05297d8468dc473f498564809f66b08e3ca63b4d4db32ebfa3" +} diff --git a/.sqlx/query-aeb70d4d015d79e73f38c782cbc787ebecfe6402a80691ee7e66e0ddb88d8628.json b/.sqlx/query-aeb70d4d015d79e73f38c782cbc787ebecfe6402a80691ee7e66e0ddb88d8628.json new file mode 100644 index 00000000..43d0c901 --- /dev/null +++ b/.sqlx/query-aeb70d4d015d79e73f38c782cbc787ebecfe6402a80691ee7e66e0ddb88d8628.json @@ -0,0 +1,18 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO files (id, url, filename, size, file_type)\n VALUES ($1, $2, $3, $4, $5)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Varchar", + "Varchar", + "Int4", + "Varchar" + ] + }, + "nullable": [] + }, + "hash": "aeb70d4d015d79e73f38c782cbc787ebecfe6402a80691ee7e66e0ddb88d8628" +} diff --git a/.sqlx/query-b047f915672649b3007d362bd50532b5e2e1e05ff276c6441e2e2b3d627df201.json b/.sqlx/query-b047f915672649b3007d362bd50532b5e2e1e05ff276c6441e2e2b3d627df201.json new file mode 100644 index 00000000..5cafc4fa --- /dev/null +++ b/.sqlx/query-b047f915672649b3007d362bd50532b5e2e1e05ff276c6441e2e2b3d627df201.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE shared_profiles\n SET updated = NOW()\n WHERE id = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "b047f915672649b3007d362bd50532b5e2e1e05ff276c6441e2e2b3d627df201" +} diff --git a/.sqlx/query-b0a13889f62d3056ad94b3893f709f2f694a20ea984655debb1bda2ec7958eaa.json b/.sqlx/query-b0a13889f62d3056ad94b3893f709f2f694a20ea984655debb1bda2ec7958eaa.json new file mode 100644 index 00000000..b83e371f --- /dev/null +++ b/.sqlx/query-b0a13889f62d3056ad94b3893f709f2f694a20ea984655debb1bda2ec7958eaa.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT version_id, file_id\n FROM versions_files\n WHERE version_id = ANY($1)\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "version_id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "file_id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8Array" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "b0a13889f62d3056ad94b3893f709f2f694a20ea984655debb1bda2ec7958eaa" +} diff --git a/.sqlx/query-b581109d66692cfa90a929dc11a06b072a6542bee1cd86a9b3df63ea01ff83e6.json b/.sqlx/query-b581109d66692cfa90a929dc11a06b072a6542bee1cd86a9b3df63ea01ff83e6.json new file mode 100644 index 00000000..7e5c86c1 --- /dev/null +++ b/.sqlx/query-b581109d66692cfa90a929dc11a06b072a6542bee1cd86a9b3df63ea01ff83e6.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM shared_profiles_links\n WHERE shared_profile_id = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "b581109d66692cfa90a929dc11a06b072a6542bee1cd86a9b3df63ea01ff83e6" +} diff --git a/.sqlx/query-b903ac4e686ef85ba28d698c668da07860e7f276b261d8f2cebb74e73b094970.json b/.sqlx/query-b903ac4e686ef85ba28d698c668da07860e7f276b261d8f2cebb74e73b094970.json deleted file mode 100644 index 8bb97239..00000000 --- a/.sqlx/query-b903ac4e686ef85ba28d698c668da07860e7f276b261d8f2cebb74e73b094970.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n DELETE FROM hashes\n WHERE EXISTS(\n SELECT 1 FROM files WHERE\n (files.version_id = $1) AND\n (hashes.file_id = files.id)\n )\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [] - }, - "hash": "b903ac4e686ef85ba28d698c668da07860e7f276b261d8f2cebb74e73b094970" -} diff --git a/.sqlx/query-c237dd43b7418185b85c96988b35934c62456e82608166677830a7a54065b7af.json b/.sqlx/query-c237dd43b7418185b85c96988b35934c62456e82608166677830a7a54065b7af.json new file mode 100644 index 00000000..53c0eac6 --- /dev/null +++ b/.sqlx/query-c237dd43b7418185b85c96988b35934c62456e82608166677830a7a54065b7af.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE shared_profiles\n SET icon_url = $1, color = $2\n WHERE (id = $3)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Int4", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "c237dd43b7418185b85c96988b35934c62456e82608166677830a7a54065b7af" +} diff --git a/.sqlx/query-c999ad4fcc54919a7b83bfad7c4624d1c670a82502373ef9b3b08c584605d1ca.json b/.sqlx/query-c999ad4fcc54919a7b83bfad7c4624d1c670a82502373ef9b3b08c584605d1ca.json new file mode 100644 index 00000000..8b5fbccb --- /dev/null +++ b/.sqlx/query-c999ad4fcc54919a7b83bfad7c4624d1c670a82502373ef9b3b08c584605d1ca.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE shared_profiles\n SET updated = NOW()\n WHERE id = ANY($1::bigint[])\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8Array" + ] + }, + "nullable": [] + }, + "hash": "c999ad4fcc54919a7b83bfad7c4624d1c670a82502373ef9b3b08c584605d1ca" +} diff --git a/.sqlx/query-ca0dcd1c64488eda289f7344d2bb2b874e1ccbe696d1ad1d918adcf571cb05cd.json b/.sqlx/query-ca0dcd1c64488eda289f7344d2bb2b874e1ccbe696d1ad1d918adcf571cb05cd.json new file mode 100644 index 00000000..73b2d7ed --- /dev/null +++ b/.sqlx/query-ca0dcd1c64488eda289f7344d2bb2b874e1ccbe696d1ad1d918adcf571cb05cd.json @@ -0,0 +1,58 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT DISTINCT shared_profile_id, f.id, f.url, f.filename, spf.install_path, f.size, f.file_type\n FROM files f\n INNER JOIN shared_profiles_files spf ON spf.file_id = f.id\n WHERE spf.shared_profile_id = ANY($1)\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "shared_profile_id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "url", + "type_info": "Varchar" + }, + { + "ordinal": 3, + "name": "filename", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "install_path", + "type_info": "Varchar" + }, + { + "ordinal": 5, + "name": "size", + "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "file_type", + "type_info": "Varchar" + } + ], + "parameters": { + "Left": [ + "Int8Array" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + true + ] + }, + "hash": "ca0dcd1c64488eda289f7344d2bb2b874e1ccbe696d1ad1d918adcf571cb05cd" +} diff --git a/.sqlx/query-cb151564dec209f0ef5cf95f3156e4082557ad5d627fba569c7f6aeca235d9ba.json b/.sqlx/query-cb151564dec209f0ef5cf95f3156e4082557ad5d627fba569c7f6aeca235d9ba.json new file mode 100644 index 00000000..a3a74558 --- /dev/null +++ b/.sqlx/query-cb151564dec209f0ef5cf95f3156e4082557ad5d627fba569c7f6aeca235d9ba.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM versions_files\n WHERE file_id = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "cb151564dec209f0ef5cf95f3156e4082557ad5d627fba569c7f6aeca235d9ba" +} diff --git a/.sqlx/query-cb57ae673f1a7e50cc319efddb9bdc82e2251596bcf85aea52e8def343e423b8.json b/.sqlx/query-cb57ae673f1a7e50cc319efddb9bdc82e2251596bcf85aea52e8def343e423b8.json deleted file mode 100644 index 2a441288..00000000 --- a/.sqlx/query-cb57ae673f1a7e50cc319efddb9bdc82e2251596bcf85aea52e8def343e423b8.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO hashes (file_id, algorithm, hash)\n VALUES ($1, $2, $3)\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Varchar", - "Bytea" - ] - }, - "nullable": [] - }, - "hash": "cb57ae673f1a7e50cc319efddb9bdc82e2251596bcf85aea52e8def343e423b8" -} diff --git a/.sqlx/query-ccfc8969a13b6c7f7939560ea299ebdc6e7cc996c3a03b2cbacc48b3ea09ef35.json b/.sqlx/query-ccfc8969a13b6c7f7939560ea299ebdc6e7cc996c3a03b2cbacc48b3ea09ef35.json new file mode 100644 index 00000000..178f36d3 --- /dev/null +++ b/.sqlx/query-ccfc8969a13b6c7f7939560ea299ebdc6e7cc996c3a03b2cbacc48b3ea09ef35.json @@ -0,0 +1,94 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT sp.id, sp.name, sp.owner_id, sp.icon_url, sp.created, sp.updated, sp.loader_id,\n l.loader, g.name as game_name, g.id as game_id, sp.metadata,\n ARRAY_AGG(DISTINCT spu.user_id) filter (WHERE spu.user_id IS NOT NULL) as users,\n ARRAY_AGG(DISTINCT spl.id) filter (WHERE spl.id IS NOT NULL) as links\n FROM shared_profiles sp \n LEFT JOIN shared_profiles_links spl ON spl.shared_profile_id = sp.id\n LEFT JOIN loaders l ON l.id = sp.loader_id\n LEFT JOIN shared_profiles_users spu ON spu.shared_profile_id = sp.id\n INNER JOIN games g ON g.id = sp.game_id\n WHERE sp.id = ANY($1)\n GROUP BY sp.id, l.id, g.id\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "name", + "type_info": "Varchar" + }, + { + "ordinal": 2, + "name": "owner_id", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "icon_url", + "type_info": "Varchar" + }, + { + "ordinal": 4, + "name": "created", + "type_info": "Timestamptz" + }, + { + "ordinal": 5, + "name": "updated", + "type_info": "Timestamptz" + }, + { + "ordinal": 6, + "name": "loader_id", + "type_info": "Int4" + }, + { + "ordinal": 7, + "name": "loader", + "type_info": "Varchar" + }, + { + "ordinal": 8, + "name": "game_name", + "type_info": "Varchar" + }, + { + "ordinal": 9, + "name": "game_id", + "type_info": "Int4" + }, + { + "ordinal": 10, + "name": "metadata", + "type_info": "Jsonb" + }, + { + "ordinal": 11, + "name": "users", + "type_info": "Int8Array" + }, + { + "ordinal": 12, + "name": "links", + "type_info": "Int8Array" + } + ], + "parameters": { + "Left": [ + "Int8Array" + ] + }, + "nullable": [ + false, + false, + false, + true, + false, + false, + false, + false, + false, + false, + false, + null, + null + ] + }, + "hash": "ccfc8969a13b6c7f7939560ea299ebdc6e7cc996c3a03b2cbacc48b3ea09ef35" +} diff --git a/.sqlx/query-cd6ac1cb990bccdde0257d1b501fbc9023eff17d78ef35786a6138c58001ddb2.json b/.sqlx/query-cd6ac1cb990bccdde0257d1b501fbc9023eff17d78ef35786a6138c58001ddb2.json new file mode 100644 index 00000000..7252d462 --- /dev/null +++ b/.sqlx/query-cd6ac1cb990bccdde0257d1b501fbc9023eff17d78ef35786a6138c58001ddb2.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO shared_profiles_versions (\n shared_profile_id, version_id\n )\n VALUES (\n $1, $2\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "cd6ac1cb990bccdde0257d1b501fbc9023eff17d78ef35786a6138c58001ddb2" +} diff --git a/.sqlx/query-cdd7f8f95c308d9474e214d584c03be0466214da1e157f6bc577b76dbef7df86.json b/.sqlx/query-cdd7f8f95c308d9474e214d584c03be0466214da1e157f6bc577b76dbef7df86.json deleted file mode 100644 index 9edda84f..00000000 --- a/.sqlx/query-cdd7f8f95c308d9474e214d584c03be0466214da1e157f6bc577b76dbef7df86.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n DELETE FROM hashes\n WHERE file_id = $1\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [] - }, - "hash": "cdd7f8f95c308d9474e214d584c03be0466214da1e157f6bc577b76dbef7df86" -} diff --git a/.sqlx/query-ce10bf641f6fbc38e5acbd76d3097f1f993d084059f2d6ce61e9bfb2364fa96e.json b/.sqlx/query-ce10bf641f6fbc38e5acbd76d3097f1f993d084059f2d6ce61e9bfb2364fa96e.json new file mode 100644 index 00000000..3326f107 --- /dev/null +++ b/.sqlx/query-ce10bf641f6fbc38e5acbd76d3097f1f993d084059f2d6ce61e9bfb2364fa96e.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE versions_files\n SET is_primary = FALSE\n WHERE (version_id = $1)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "ce10bf641f6fbc38e5acbd76d3097f1f993d084059f2d6ce61e9bfb2364fa96e" +} diff --git a/.sqlx/query-d67e6c185460a17b65c0dc01be0f436b87acc79fc56513f1c5c4c99e9b9cb283.json b/.sqlx/query-d67e6c185460a17b65c0dc01be0f436b87acc79fc56513f1c5c4c99e9b9cb283.json new file mode 100644 index 00000000..8cae6494 --- /dev/null +++ b/.sqlx/query-d67e6c185460a17b65c0dc01be0f436b87acc79fc56513f1c5c4c99e9b9cb283.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO hashes (file_id, algorithm, hash)\n VALUES ($1, $2, $3)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Varchar", + "Bytea" + ] + }, + "nullable": [] + }, + "hash": "d67e6c185460a17b65c0dc01be0f436b87acc79fc56513f1c5c4c99e9b9cb283" +} diff --git a/.sqlx/query-d77c356d92fc2e99ed8e8072d9dfa744d9edb5f5149f54b9f04338fa7e98e4b1.json b/.sqlx/query-d77c356d92fc2e99ed8e8072d9dfa744d9edb5f5149f54b9f04338fa7e98e4b1.json new file mode 100644 index 00000000..bacf6512 --- /dev/null +++ b/.sqlx/query-d77c356d92fc2e99ed8e8072d9dfa744d9edb5f5149f54b9f04338fa7e98e4b1.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE shared_profiles SET metadata = $1 WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "d77c356d92fc2e99ed8e8072d9dfa744d9edb5f5149f54b9f04338fa7e98e4b1" +} diff --git a/.sqlx/query-d790254ed6c8cf094d8e80ec0fe572ea5ef594ef504f5610ffb52a45c70da797.json b/.sqlx/query-d790254ed6c8cf094d8e80ec0fe572ea5ef594ef504f5610ffb52a45c70da797.json new file mode 100644 index 00000000..6ccb6e6d --- /dev/null +++ b/.sqlx/query-d790254ed6c8cf094d8e80ec0fe572ea5ef594ef504f5610ffb52a45c70da797.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO shared_profiles_users (\n shared_profile_id, user_id\n )\n VALUES (\n $1, $2\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "d790254ed6c8cf094d8e80ec0fe572ea5ef594ef504f5610ffb52a45c70da797" +} diff --git a/.sqlx/query-d8b4e7e382c77a05395124d5a6a27cccb687d0e2c31b76d49b03aa364d099d42.json b/.sqlx/query-d8b4e7e382c77a05395124d5a6a27cccb687d0e2c31b76d49b03aa364d099d42.json deleted file mode 100644 index 703fe4a1..00000000 --- a/.sqlx/query-d8b4e7e382c77a05395124d5a6a27cccb687d0e2c31b76d49b03aa364d099d42.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n DELETE FROM files\n WHERE files.version_id = $1\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [] - }, - "hash": "d8b4e7e382c77a05395124d5a6a27cccb687d0e2c31b76d49b03aa364d099d42" -} diff --git a/.sqlx/query-dd8a0e5976094bc3285326dd78f92a502d56763d737d0c9927b8ae4dcbbabf24.json b/.sqlx/query-dd8a0e5976094bc3285326dd78f92a502d56763d737d0c9927b8ae4dcbbabf24.json new file mode 100644 index 00000000..db778f7b --- /dev/null +++ b/.sqlx/query-dd8a0e5976094bc3285326dd78f92a502d56763d737d0c9927b8ae4dcbbabf24.json @@ -0,0 +1,29 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT version_id, file_id\n FROM versions_files vf\n LEFT JOIN versions v ON v.id = vf.version_id\n LEFT JOIN mods m ON m.id = v.mod_id\n WHERE m.status = ANY($1) AND file_id = ANY($2::bigint[])\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "version_id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "file_id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "TextArray", + "Int8Array" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "dd8a0e5976094bc3285326dd78f92a502d56763d737d0c9927b8ae4dcbbabf24" +} diff --git a/.sqlx/query-e3cc1fd070b97c4cc36bdb2f33080d4e0d7f3c3d81312d9d28a8c3c8213ad54b.json b/.sqlx/query-e3cc1fd070b97c4cc36bdb2f33080d4e0d7f3c3d81312d9d28a8c3c8213ad54b.json deleted file mode 100644 index 241178a3..00000000 --- a/.sqlx/query-e3cc1fd070b97c4cc36bdb2f33080d4e0d7f3c3d81312d9d28a8c3c8213ad54b.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n DELETE FROM files\n WHERE files.id = $1\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [] - }, - "hash": "e3cc1fd070b97c4cc36bdb2f33080d4e0d7f3c3d81312d9d28a8c3c8213ad54b" -} diff --git a/.sqlx/query-e539388f748db3cbda83b27df86668dcf0703d987fc1dae89c1b97badb74f73b.json b/.sqlx/query-e539388f748db3cbda83b27df86668dcf0703d987fc1dae89c1b97badb74f73b.json new file mode 100644 index 00000000..3e28a435 --- /dev/null +++ b/.sqlx/query-e539388f748db3cbda83b27df86668dcf0703d987fc1dae89c1b97badb74f73b.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM versions_files\n WHERE versions_files.version_id = $1\n RETURNING file_id\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "file_id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "e539388f748db3cbda83b27df86668dcf0703d987fc1dae89c1b97badb74f73b" +} diff --git a/.sqlx/query-eb66aaed2aec0466f9b2b11d5bf295b89dd680b0fc4866f2e58bd211ea6d7efe.json b/.sqlx/query-eb66aaed2aec0466f9b2b11d5bf295b89dd680b0fc4866f2e58bd211ea6d7efe.json new file mode 100644 index 00000000..f6db1437 --- /dev/null +++ b/.sqlx/query-eb66aaed2aec0466f9b2b11d5bf295b89dd680b0fc4866f2e58bd211ea6d7efe.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "SELECT EXISTS(SELECT 1 FROM shared_profiles WHERE id=$1)", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "exists", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "eb66aaed2aec0466f9b2b11d5bf295b89dd680b0fc4866f2e58bd211ea6d7efe" +} diff --git a/.sqlx/query-f26bed47f72b6732c54794d47c05a192b37229dfa7a930b946566fefbf6e1378.json b/.sqlx/query-f26bed47f72b6732c54794d47c05a192b37229dfa7a930b946566fefbf6e1378.json new file mode 100644 index 00000000..8fc203d6 --- /dev/null +++ b/.sqlx/query-f26bed47f72b6732c54794d47c05a192b37229dfa7a930b946566fefbf6e1378.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT id, shared_profile_id, created, expires\n FROM shared_profiles_links spl\n WHERE spl.shared_profile_id = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "shared_profile_id", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "created", + "type_info": "Timestamptz" + }, + { + "ordinal": 3, + "name": "expires", + "type_info": "Timestamptz" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + false, + false, + false + ] + }, + "hash": "f26bed47f72b6732c54794d47c05a192b37229dfa7a930b946566fefbf6e1378" +} diff --git a/.sqlx/query-f5e2e1cb44e42aca9ea0edb8b8f39e33614c2b53ff3b4c1124d0a2e9da53f9ea.json b/.sqlx/query-f5e2e1cb44e42aca9ea0edb8b8f39e33614c2b53ff3b4c1124d0a2e9da53f9ea.json new file mode 100644 index 00000000..8a8a29a7 --- /dev/null +++ b/.sqlx/query-f5e2e1cb44e42aca9ea0edb8b8f39e33614c2b53ff3b4c1124d0a2e9da53f9ea.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT sp.id\n FROM shared_profiles sp \n LEFT JOIN shared_profiles_users spu ON spu.shared_profile_id = sp.id\n WHERE spu.user_id = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "f5e2e1cb44e42aca9ea0edb8b8f39e33614c2b53ff3b4c1124d0a2e9da53f9ea" +} diff --git a/.sqlx/query-f6395015c3287dbfebcf9f3e0e852fb5320e093d09a42ebcb5b44fdad0860cde.json b/.sqlx/query-f6395015c3287dbfebcf9f3e0e852fb5320e093d09a42ebcb5b44fdad0860cde.json new file mode 100644 index 00000000..482d91c5 --- /dev/null +++ b/.sqlx/query-f6395015c3287dbfebcf9f3e0e852fb5320e093d09a42ebcb5b44fdad0860cde.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM shared_profiles_files\n WHERE file_id = ANY($1::bigint[])\n RETURNING shared_profile_id, file_id\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "shared_profile_id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "file_id", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8Array" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "f6395015c3287dbfebcf9f3e0e852fb5320e093d09a42ebcb5b44fdad0860cde" +} diff --git a/.sqlx/query-f8f3c599bfcc34817cbb3d08ddf922a8a2bc142d955e450f44aa57a75db61d6f.json b/.sqlx/query-f8f3c599bfcc34817cbb3d08ddf922a8a2bc142d955e450f44aa57a75db61d6f.json new file mode 100644 index 00000000..d4b1347a --- /dev/null +++ b/.sqlx/query-f8f3c599bfcc34817cbb3d08ddf922a8a2bc142d955e450f44aa57a75db61d6f.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "INSERT INTO shared_profiles_users (shared_profile_id, user_id) VALUES ($1, $2)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "f8f3c599bfcc34817cbb3d08ddf922a8a2bc142d955e450f44aa57a75db61d6f" +} diff --git a/.sqlx/query-fb3a5fad9a94c4323446b4752ba426d1398560166b6c9796fe13821b70dd6a78.json b/.sqlx/query-fb3a5fad9a94c4323446b4752ba426d1398560166b6c9796fe13821b70dd6a78.json new file mode 100644 index 00000000..13867556 --- /dev/null +++ b/.sqlx/query-fb3a5fad9a94c4323446b4752ba426d1398560166b6c9796fe13821b70dd6a78.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM shared_profiles\n WHERE id = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [] + }, + "hash": "fb3a5fad9a94c4323446b4752ba426d1398560166b6c9796fe13821b70dd6a78" +} diff --git a/.sqlx/query-ff2c1151935b80022bf5ba7f396a1b2c017465c74c08742cfc0d0858a06bb0d0.json b/.sqlx/query-ff2c1151935b80022bf5ba7f396a1b2c017465c74c08742cfc0d0858a06bb0d0.json new file mode 100644 index 00000000..add17836 --- /dev/null +++ b/.sqlx/query-ff2c1151935b80022bf5ba7f396a1b2c017465c74c08742cfc0d0858a06bb0d0.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE shared_profiles SET loader_id = $1 WHERE id = $2", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "ff2c1151935b80022bf5ba7f396a1b2c017465c74c08742cfc0d0858a06bb0d0" +} diff --git a/migrations/20231226012200_shared_modpacks.sql b/migrations/20231226012200_shared_modpacks.sql new file mode 100644 index 00000000..b40883cc --- /dev/null +++ b/migrations/20231226012200_shared_modpacks.sql @@ -0,0 +1,64 @@ +CREATE TABLE shared_profiles ( + id bigint PRIMARY KEY, + name varchar(255) NOT NULL, + owner_id bigint NOT NULL, + icon_url varchar(255), + color integer NULL, + updated timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP, + created timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP, + + loader_id int NOT NULL REFERENCES loaders(id), + metadata jsonb NOT NULL DEFAULT '{}'::jsonb, + + game_id int NOT NULL REFERENCES games(id) +); + +CREATE TABLE shared_profiles_links ( + id bigint PRIMARY KEY, -- id of the shared profile link (doubles as the link identifier) + shared_profile_id bigint NOT NULL REFERENCES shared_profiles(id), + created timestamptz NOT NULL DEFAULT now(), + expires timestamptz NOT NULL +); + +CREATE TABLE shared_profiles_users ( + shared_profile_id bigint NOT NULL REFERENCES shared_profiles(id), + user_id bigint NOT NULL REFERENCES users(id), + CONSTRAINT shared_profiles_users_unique UNIQUE (shared_profile_id, user_id) +); + +-- Together, the following two tables comprise the list of files that are part of a shared profile. +-- for versions we have hosted +CREATE TABLE shared_profiles_versions ( + shared_profile_id bigint NOT NULL REFERENCES shared_profiles(id), + version_id bigint NULL REFERENCES versions(id) -- for versions +); + +-- for files we host directly +CREATE TABLE shared_profiles_files ( + shared_profile_id bigint NOT NULL REFERENCES shared_profiles(id), + file_id bigint NOT NULL REFERENCES files(id), + install_path varchar(255) NOT NULL +); + +-- Now that files do not necessarily have a version, we create a table to store them +CREATE TABLE versions_files ( + version_id bigint NOT NULL REFERENCES versions(id), + is_primary boolean NOT NULL DEFAULT false, + file_id bigint NOT NULL REFERENCES files(id) +); + +-- Populate with the previously named 'version_id' column of the files table +INSERT INTO versions_files (version_id, file_id, is_primary) +-- NOTE: Temporarily disabled due to unexpected data issue with staging data. Should be enabled before merging, and the issue should be resolved. +--SELECT version_id, id, is_primary FROM files; +SELECT v.id, f.id, is_primary FROM files f LEFT JOIN versions v ON f.version_id = v.id WHERE v.id is not null; + + +-- Drop the version_id and is_primary columns from the files table +ALTER TABLE files DROP COLUMN version_id; +ALTER TABLE files DROP COLUMN is_primary; + +-- Adds a unique index based on the 'algorithm' and 'hash' pair on the hashes table +-- NOTE: Temporarily disabled due to unexpected data issue with staging data. Should be enabled before merging, and the issue should be resolved. +-- In essence, there are hash collisions where there shouldn't be- entire file duplicates where the file url, version_id, etc, all are the same except for the file_id. +-- CREATE UNIQUE INDEX hashes_algorithm_hash_unique ON hashes (algorithm, hash); \ No newline at end of file diff --git a/src/database/models/client_profile_item.rs b/src/database/models/client_profile_item.rs new file mode 100644 index 00000000..0cd409cc --- /dev/null +++ b/src/database/models/client_profile_item.rs @@ -0,0 +1,623 @@ +use std::collections::HashMap; +use std::path::PathBuf; + +use super::{file_item, ids::*}; +use crate::database::redis::RedisPool; +use crate::{database::models::DatabaseError, models::projects::FileType}; +use chrono::{DateTime, Utc}; +use dashmap::{DashMap, DashSet}; +use futures::TryStreamExt; +use serde::{Deserialize, Serialize}; + +pub const CLIENT_PROFILES_NAMESPACE: &str = "client_profiles"; + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ClientProfile { + pub id: ClientProfileId, + pub name: String, + pub owner_id: UserId, + pub icon_url: Option, + pub created: DateTime, + pub updated: DateTime, + + pub game_id: GameId, + pub game_name: String, + pub metadata: ClientProfileMetadata, + + pub users: Vec, + + // These represent the same loader + pub loader_id: LoaderId, + pub loader: String, + + pub versions: Vec, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct QueryClientProfile { + pub inner: ClientProfile, + pub links: Vec, + pub override_files: Vec, +} + +#[derive(Clone, Deserialize, Serialize, PartialEq, Eq, Debug)] +pub struct QueryClientProfileFile { + pub id: FileId, + pub url: String, + pub filename: String, + pub hashes: HashMap, + pub install_path: PathBuf, + pub size: u32, + pub file_type: Option, +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub enum ClientProfileMetadata { + Minecraft { + loader_version: String, + game_version_id: LoaderFieldEnumValueId, + // TODO: Currently, we store the game_version directly. If client profiles use more than just Minecraft, + // this should change to use a variant of dynamic loader field system that versions use, and fields like + // this would be loaded dynamically from the loader_field_enum_values table. + game_version: String, + }, + Unknown, +} + +impl ClientProfile { + pub async fn insert( + &self, + transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>, + ) -> Result<(), DatabaseError> { + let metadata = serde_json::to_value(&self.metadata).map_err(|e| { + DatabaseError::SchemaError(format!("Could not serialize metadata: {}", e)) + })?; + + sqlx::query!( + " + INSERT INTO shared_profiles ( + id, name, owner_id, icon_url, created, updated, + loader_id, game_id, metadata + ) + VALUES ( + $1, $2, $3, $4, $5, $6, + $7, $8, $9 + ) + ", + self.id as ClientProfileId, + self.name, + self.owner_id as UserId, + self.icon_url, + self.created, + self.updated, + self.loader_id as LoaderId, + self.game_id.0, + metadata + ) + .execute(&mut **transaction) + .await?; + + // Insert users + for user_id in &self.users { + sqlx::query!( + " + INSERT INTO shared_profiles_users ( + shared_profile_id, user_id + ) + VALUES ( + $1, $2 + ) + ", + self.id as ClientProfileId, + user_id.0, + ) + .execute(&mut **transaction) + .await?; + } + + // Insert versions + for version_id in &self.versions { + sqlx::query!( + " + INSERT INTO shared_profiles_versions ( + shared_profile_id, version_id + ) + VALUES ( + $1, $2 + ) + ", + self.id as ClientProfileId, + version_id.0, + ) + .execute(&mut **transaction) + .await?; + } + + Ok(()) + } + + // Returns the hashes of the files that were deleted, so they can be deleted from the file host + pub async fn remove( + id: ClientProfileId, + transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>, + redis: &RedisPool, + ) -> Result, DatabaseError> { + // Delete shared_profiles_links + sqlx::query!( + " + DELETE FROM shared_profiles_links + WHERE shared_profile_id = $1 + ", + id as ClientProfileId, + ) + .execute(&mut **transaction) + .await?; + + // Delete shared_profiles_users + sqlx::query!( + " + DELETE FROM shared_profiles_users + WHERE shared_profile_id = $1 + ", + id as ClientProfileId, + ) + .execute(&mut **transaction) + .await?; + + // Deletes attached versions + sqlx::query!( + " + DELETE FROM shared_profiles_versions + WHERE shared_profile_id = $1 + ", + id as ClientProfileId, + ) + .execute(&mut **transaction) + .await?; + + // Deletes attached files- we return the hashes so we can delete them from the file host if needed + let deleted_ids = sqlx::query!( + " + DELETE FROM shared_profiles_files + WHERE shared_profile_id = $1 + RETURNING file_id + ", + id as ClientProfileId, + ) + .fetch_all(&mut **transaction) + .await? + .into_iter() + .map(|x| FileId(x.file_id)) + .collect::>(); + + // Check if any versions_files or shared_profiles_files still reference the file- these files should not be deleted + // Delete the files that are not referenced + let removed_hashes = file_item::remove_unreferenced_files(deleted_ids, transaction).await?; + + sqlx::query!( + " + DELETE FROM shared_profiles_links + WHERE shared_profile_id = $1 + ", + id as ClientProfileId, + ) + .execute(&mut **transaction) + .await?; + + sqlx::query!( + " + DELETE FROM shared_profiles + WHERE id = $1 + ", + id as ClientProfileId, + ) + .execute(&mut **transaction) + .await?; + + ClientProfile::clear_cache(id, redis).await?; + + Ok(removed_hashes) + } + + pub async fn get<'a, 'b, E>( + id: ClientProfileId, + executor: E, + redis: &RedisPool, + ) -> Result, DatabaseError> + where + E: sqlx::Acquire<'a, Database = sqlx::Postgres>, + { + Self::get_many(&[id], executor, redis) + .await + .map(|x| x.into_iter().next()) + } + + pub async fn get_ids_for_user<'a, E>( + user_id: UserId, + exec: E, + ) -> Result, DatabaseError> + where + E: sqlx::Acquire<'a, Database = sqlx::Postgres>, + { + let mut exec = exec.acquire().await?; + let db_profiles: Vec = sqlx::query!( + " + SELECT sp.id + FROM shared_profiles sp + LEFT JOIN shared_profiles_users spu ON spu.shared_profile_id = sp.id + WHERE spu.user_id = $1 + ", + user_id.0 + ) + .fetch_many(&mut *exec) + .try_filter_map(|e| async { Ok(e.right().map(|m| ClientProfileId(m.id))) }) + .try_collect::>() + .await?; + Ok(db_profiles) + } + + pub async fn get_many<'a, E>( + ids: &[ClientProfileId], + exec: E, + redis: &RedisPool, + ) -> Result, DatabaseError> + where + E: sqlx::Acquire<'a, Database = sqlx::Postgres>, + { + if ids.is_empty() { + return Ok(Vec::new()); + } + + let mut redis = redis.connect().await?; + let mut exec = exec.acquire().await?; + + let mut found_profiles = Vec::new(); + let mut remaining_ids: Vec = ids.to_vec(); + + if !ids.is_empty() { + let profiles = redis + .multi_get::(CLIENT_PROFILES_NAMESPACE, ids.iter().map(|x| x.0)) + .await?; + for profile in profiles { + if let Some(profile) = + profile.and_then(|x| serde_json::from_str::(&x).ok()) + { + remaining_ids.retain(|x| profile.inner.id != *x); + found_profiles.push(profile); + continue; + } + } + } + + if !remaining_ids.is_empty() { + let shared_profiles_versions: DashMap> = sqlx::query!( + " + SELECT shared_profile_id, version_id + FROM shared_profiles_versions spv + WHERE spv.shared_profile_id = ANY($1) + ", + &remaining_ids.iter().map(|x| x.0).collect::>() + ) + .fetch(&mut *exec) + .try_fold( + DashMap::new(), + |acc: DashMap>, m| { + let version_id = m.version_id.map(VersionId); + if let Some(version_id) = version_id { + acc.entry(ClientProfileId(m.shared_profile_id)) + .or_default() + .push(version_id); + } + async move { Ok(acc) } + }, + ) + .await?; + + #[derive(Deserialize)] + struct Hash { + pub file_id: FileId, + pub algorithm: String, + pub hash: String, + } + + #[derive(Deserialize)] + struct File { + pub id: FileId, + pub url: String, + pub filename: String, + pub install_path: PathBuf, + pub size: u32, + pub file_type: Option, + } + + let file_ids = DashSet::new(); + let reverse_file_map = DashMap::new(); + let files : DashMap> = sqlx::query!( + " + SELECT DISTINCT shared_profile_id, f.id, f.url, f.filename, spf.install_path, f.size, f.file_type + FROM files f + INNER JOIN shared_profiles_files spf ON spf.file_id = f.id + WHERE spf.shared_profile_id = ANY($1) + ", + &remaining_ids.iter().map(|x| x.0).collect::>() + ).fetch(&mut *exec) + .try_fold(DashMap::new(), |acc : DashMap>, m| { + let file = File { + id: FileId(m.id), + url: m.url, + filename: m.filename, + install_path: m.install_path.into(), + size: m.size as u32, + file_type: m.file_type.map(|x| FileType::from_string(&x)), + }; + + file_ids.insert(FileId(m.id)); + reverse_file_map.insert(FileId(m.id), ClientProfileId(m.shared_profile_id)); + + acc.entry(ClientProfileId(m.shared_profile_id)) + .or_default() + .push(file); + async move { Ok(acc) } + } + ).await?; + + let hashes: DashMap> = sqlx::query!( + " + SELECT DISTINCT file_id, algorithm, encode(hash, 'escape') hash + FROM hashes + WHERE file_id = ANY($1) + ", + &file_ids.iter().map(|x| x.0).collect::>() + ) + .fetch(&mut *exec) + .try_fold( + DashMap::new(), + |acc: DashMap>, m| { + if let Some(found_hash) = m.hash { + let hash = Hash { + file_id: FileId(m.file_id), + algorithm: m.algorithm, + hash: found_hash, + }; + + if let Some(profile_id) = reverse_file_map.get(&FileId(m.file_id)) { + acc.entry(*profile_id).or_default().push(hash); + } + } + async move { Ok(acc) } + }, + ) + .await?; + + let shared_profiles_links: DashMap> = + sqlx::query!( + " + SELECT id, shared_profile_id, created, expires + FROM shared_profiles_links spl + WHERE spl.shared_profile_id = ANY($1) + ", + &remaining_ids.iter().map(|x| x.0).collect::>() + ) + .fetch(&mut *exec) + .try_fold( + DashMap::new(), + |acc_links: DashMap>, m| { + let link = ClientProfileLink { + id: ClientProfileLinkId(m.id), + shared_profile_id: ClientProfileId(m.shared_profile_id), + created: m.created, + expires: m.expires, + }; + acc_links + .entry(ClientProfileId(m.shared_profile_id)) + .or_default() + .push(link); + async move { Ok(acc_links) } + }, + ) + .await?; + + // One to many for shared_profiles to loaders, so can safely group by shared_profile_id + let db_profiles: Vec = sqlx::query!( + r#" + SELECT sp.id, sp.name, sp.owner_id, sp.icon_url, sp.created, sp.updated, sp.loader_id, + l.loader, g.name as game_name, g.id as game_id, sp.metadata, + ARRAY_AGG(DISTINCT spu.user_id) filter (WHERE spu.user_id IS NOT NULL) as users, + ARRAY_AGG(DISTINCT spl.id) filter (WHERE spl.id IS NOT NULL) as links + FROM shared_profiles sp + LEFT JOIN shared_profiles_links spl ON spl.shared_profile_id = sp.id + LEFT JOIN loaders l ON l.id = sp.loader_id + LEFT JOIN shared_profiles_users spu ON spu.shared_profile_id = sp.id + INNER JOIN games g ON g.id = sp.game_id + WHERE sp.id = ANY($1) + GROUP BY sp.id, l.id, g.id + "#, + &remaining_ids.iter().map(|x| x.0).collect::>() + ) + .fetch_many(&mut *exec) + .try_filter_map(|e| async { + Ok(e.right().map(|m| { + let id = ClientProfileId(m.id); + let versions = shared_profiles_versions + .remove(&id) + .map(|(_, x)| x) + .unwrap_or_default(); + let files = files.remove(&id).map(|(_,x)| x).unwrap_or_default(); + let hashes = hashes.remove(&id).map(|x|x.1).unwrap_or_default(); + + let links = shared_profiles_links.remove(&id).map(|x| x.1).unwrap_or_default(); + let game_id = GameId(m.game_id); + let metadata = serde_json::from_value::(m.metadata).unwrap_or(ClientProfileMetadata::Unknown); + let files = files.into_iter().map(|x| { + let mut file_hashes = HashMap::new(); + + for hash in hashes.iter() { + if hash.file_id == x.id { + file_hashes.insert( + hash.algorithm.clone(), + hash.hash.clone(), + ); + } + } + + QueryClientProfileFile { + id: x.id, + url: x.url.clone(), + filename: x.filename.clone(), + hashes: file_hashes, + install_path: x.install_path, + size: x.size, + file_type: x.file_type, + } + }).collect::>(); + + QueryClientProfile { + inner: ClientProfile { + id, + name: m.name, + icon_url: m.icon_url, + updated: m.updated, + created: m.created, + owner_id: UserId(m.owner_id), + game_id, + users: m.users.unwrap_or_default().into_iter().map(UserId).collect(), + loader_id: LoaderId(m.loader_id), + game_name: m.game_name, + metadata, + loader: m.loader, + versions, + }, + links, + override_files: files, + } + })) + }) + .try_collect::>() + .await?; + + for profile in db_profiles { + redis + .set_serialized_to_json( + CLIENT_PROFILES_NAMESPACE, + profile.inner.id.0, + &profile, + None, + ) + .await?; + found_profiles.push(profile); + } + } + + Ok(found_profiles) + } + + pub async fn clear_cache(id: ClientProfileId, redis: &RedisPool) -> Result<(), DatabaseError> { + let mut redis = redis.connect().await?; + + redis + .delete_many([(CLIENT_PROFILES_NAMESPACE, Some(id.0.to_string()))]) + .await?; + Ok(()) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +pub struct ClientProfileLink { + pub id: ClientProfileLinkId, + pub shared_profile_id: ClientProfileId, + pub created: DateTime, + pub expires: DateTime, +} + +impl ClientProfileLink { + pub async fn insert( + &self, + transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>, + ) -> Result<(), DatabaseError> { + sqlx::query!( + " + INSERT INTO shared_profiles_links ( + id, shared_profile_id, created, expires + ) + VALUES ( + $1, $2, $3, $4 + ) + ", + self.id.0, + self.shared_profile_id.0, + self.created, + self.expires, + ) + .execute(&mut **transaction) + .await?; + + Ok(()) + } + + pub async fn list<'a, 'b, E>( + shared_profile_id: ClientProfileId, + executor: E, + ) -> Result, DatabaseError> + where + E: sqlx::Acquire<'a, Database = sqlx::Postgres>, + { + let mut exec = executor.acquire().await?; + + let links = sqlx::query!( + " + SELECT id, shared_profile_id, created, expires + FROM shared_profiles_links spl + WHERE spl.shared_profile_id = $1 + ", + shared_profile_id.0 + ) + .fetch_many(&mut *exec) + .try_filter_map(|e| async { + Ok(e.right().map(|m| ClientProfileLink { + id: ClientProfileLinkId(m.id), + shared_profile_id: ClientProfileId(m.shared_profile_id), + created: m.created, + expires: m.expires, + })) + }) + .try_collect::>() + .await?; + + Ok(links) + } + + pub async fn get<'a, 'b, E>( + id: ClientProfileLinkId, + executor: E, + ) -> Result, DatabaseError> + where + E: sqlx::Acquire<'a, Database = sqlx::Postgres>, + { + let mut exec = executor.acquire().await?; + + let link = sqlx::query!( + " + SELECT id, shared_profile_id, created, expires + FROM shared_profiles_links spl + WHERE spl.id = $1 + ", + id.0 + ) + .fetch_optional(&mut *exec) + .await? + .map(|m| ClientProfileLink { + id: ClientProfileLinkId(m.id), + shared_profile_id: ClientProfileId(m.shared_profile_id), + created: m.created, + expires: m.expires, + }); + + Ok(link) + } +} + +pub struct ClientProfileOverride { + pub file_hash: String, + pub url: String, + pub install_path: PathBuf, +} diff --git a/src/database/models/file_item.rs b/src/database/models/file_item.rs new file mode 100644 index 00000000..2bc090f8 --- /dev/null +++ b/src/database/models/file_item.rs @@ -0,0 +1,345 @@ +use std::{collections::HashMap, path::PathBuf}; + +use itertools::Itertools; + +use crate::{ + database::{models::VersionId, redis::RedisPool}, + models::{self, projects::FileType}, + routes::CommonError, +}; + +use super::{client_profile_item, generate_file_id, ClientProfileId, DatabaseError, FileId}; + +#[derive(Clone, Debug)] +pub struct VersionFileBuilder { + pub url: String, + pub filename: String, + pub hashes: Vec, + pub primary: bool, + // Whether a new file should be generated or an existing one should be used + // If one is provided, that file will be connected to the version instead of creating a new one + // This is used on rare allowable hash collisions, such as two unapproved versions + // No two approved versions should ever have the same file- this is enforced elsewhere + pub existing_file: Option, + pub size: u32, + pub file_type: Option, +} + +#[derive(Clone, Debug)] +pub struct HashBuilder { + pub algorithm: String, + pub hash: Vec, +} + +impl VersionFileBuilder { + pub async fn insert( + self, + version_id: VersionId, + transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>, + ) -> Result { + let file_id = if let Some(file_id) = self.existing_file { + file_id + } else { + let file_id = generate_file_id(&mut *transaction).await?; + + sqlx::query!( + " + INSERT INTO files (id, url, filename, size, file_type) + VALUES ($1, $2, $3, $4, $5) + ", + file_id as FileId, + self.url, + self.filename, + self.size as i32, + self.file_type.map(|x| x.as_str()), + ) + .execute(&mut **transaction) + .await?; + + for hash in self.hashes { + sqlx::query!( + " + INSERT INTO hashes (file_id, algorithm, hash) + VALUES ($1, $2, $3) + ", + file_id as FileId, + hash.algorithm, + hash.hash, + ) + .execute(&mut **transaction) + .await?; + } + + file_id + }; + + sqlx::query!( + " + INSERT INTO versions_files (version_id, file_id, is_primary) + VALUES ($1, $2, $3) + ", + version_id as VersionId, + file_id as FileId, + self.primary, + ) + .execute(&mut **transaction) + .await?; + + Ok(file_id) + } +} + +#[derive(Clone, Debug)] +pub struct ClientProfileFileBuilder { + pub url: String, + pub filename: String, + pub hashes: Vec, + pub install_path: PathBuf, + // Whether a new file should be generated or an existing one should be used + // If one is provided, that file will be connected to the profile instead of creating a new one + pub existing_file: Option, + pub size: u32, + pub file_type: Option, +} + +impl ClientProfileFileBuilder { + pub async fn insert( + self, + profile_id: ClientProfileId, + transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>, + ) -> Result { + let file_id = if let Some(file_id) = self.existing_file { + file_id + } else { + let file_id = generate_file_id(&mut *transaction).await?; + + sqlx::query!( + " + INSERT INTO files (id, url, filename, size, file_type) + VALUES ($1, $2, $3, $4, $5) + ", + file_id as FileId, + self.url, + self.filename, + self.size as i32, + self.file_type.map(|x| x.as_str()), + ) + .execute(&mut **transaction) + .await?; + + for hash in self.hashes { + sqlx::query!( + " + INSERT INTO hashes (file_id, algorithm, hash) + VALUES ($1, $2, $3) + ", + file_id as FileId, + hash.algorithm, + hash.hash, + ) + .execute(&mut **transaction) + .await?; + } + + file_id + }; + + sqlx::query!( + " + INSERT INTO shared_profiles_files (shared_profile_id, file_id, install_path) + VALUES ($1, $2, $3) + ", + profile_id as ClientProfileId, + file_id as FileId, + self.install_path.to_string_lossy().to_string(), + ) + .execute(&mut **transaction) + .await?; + + Ok(file_id) + } +} + +// Remove files that are not referenced by any versions_files or shared_profiles_files +// This is a separate function because it is used in multiple places +// Returns a list of hashes that were deleted, so they can be removed from the file host +pub async fn remove_unreferenced_files( + file_ids: Vec, + transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>, +) -> Result, DatabaseError> { + let file_ids = file_ids.into_iter().map(|x| x.0).collect::>(); + + // Check if any versions_files or shared_profiles_files still reference the file- these files should not be deleted + let referenced_files = sqlx::query!( + " + SELECT f.id + FROM files f + LEFT JOIN versions_files vf ON vf.file_id = f.id + LEFT JOIN shared_profiles_files spf ON spf.file_id = f.id + WHERE f.id = ANY($1) AND (vf.version_id IS NOT NULL OR spf.shared_profile_id IS NOT NULL) + ", + &file_ids[..], + ) + .fetch_all(&mut **transaction) + .await? + .into_iter() + .filter_map(|x| x.id) + .collect::>(); + + // Filter out the referenced files + let file_ids = file_ids + .into_iter() + .filter(|x| !referenced_files.contains(x)) + .collect::>(); + + // Delete hashes for the files remaining + let hashes: Vec = sqlx::query!( + " + DELETE FROM hashes + WHERE EXISTS( + SELECT 1 FROM files WHERE + (files.id = ANY($1) AND hashes.file_id = files.id) + ) + RETURNING encode(hashes.hash, 'escape') hash + ", + &file_ids[..], + ) + .fetch_all(&mut **transaction) + .await? + .into_iter() + .filter_map(|x| x.hash) + .collect::>(); + + // Delete files remaining + sqlx::query!( + " + DELETE FROM files + WHERE files.id = ANY($1) + ", + &file_ids[..], + ) + .execute(&mut **transaction) + .await?; + + Ok(hashes) +} + +// Converts shared_profiles_files to shared_profiles_versions for cases of +// hash collisions for files that versions now 'own'. +// It also ensures that all files have at exactly one approved version- the one that was just approved. +// It returns a schema error if any file has multiple approved versions (reverting the transaction) +// (Before they are approved, uploaded files can have hash collections) +// This is a separate function because it is used in multiple places. +pub async fn convert_hash_collisions_to_versions( + approved_version_ids: &[VersionId], + transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>, + redis: &RedisPool, +) -> Result<(), T> +where + T: CommonError + From + From, +{ + // First, get all file id associated with these versions + let file_ids: HashMap = sqlx::query!( + " + SELECT version_id, file_id + FROM versions_files + WHERE version_id = ANY($1) + ", + &approved_version_ids.iter().map(|x| x.0).collect::>()[..], + ) + .fetch_all(&mut **transaction) + .await? + .into_iter() + .map(|x| (FileId(x.file_id), VersionId(x.version_id))) + .collect(); + + // For each file, get all approved project's versions that have that file + let existing_approved_versions: HashMap> = sqlx::query!( + " + SELECT version_id, file_id + FROM versions_files vf + LEFT JOIN versions v ON v.id = vf.version_id + LEFT JOIN mods m ON m.id = v.mod_id + WHERE m.status = ANY($1) AND file_id = ANY($2::bigint[]) + ", + &*crate::models::projects::ProjectStatus::iterator() + .filter(|x| x.is_approved()) + .map(|x| x.to_string()) + .collect::>(), + &file_ids.keys().map(|x| x.0).collect::>()[..], + ) + .fetch_all(&mut **transaction) + .await? + .into_iter() + .map(|x| (FileId(x.file_id), VersionId(x.version_id))) + .into_group_map(); + + // Ensure that all files have at exactly one approved version- the one that was just approved + for (file_id, version_ids) in existing_approved_versions { + let Some(intended_version_id) = file_ids.get(&file_id) else { + continue; + }; + + if version_ids.len() != 1 || !version_ids.contains(intended_version_id) { + let versions: Vec = + version_ids.iter().map(|x| (*x).into()).collect(); + return Err(T::invalid_input(format!( + "File {} has existing or multiple approved versions: {}", + file_id.0, + versions.into_iter().join(", ") + ))); + } + } + + // Delete all shared_profiles_files that reference these files + let shared_profile_ids: Vec<(ClientProfileId, FileId)> = sqlx::query!( + " + DELETE FROM shared_profiles_files + WHERE file_id = ANY($1::bigint[]) + RETURNING shared_profile_id, file_id + ", + &file_ids.keys().map(|x| x.0).collect::>()[..], + ) + .fetch_all(&mut **transaction) + .await? + .into_iter() + .map(|x| (ClientProfileId(x.shared_profile_id), FileId(x.file_id))) + .collect(); + + // Add as versions + let versions_to_add: Vec<(ClientProfileId, VersionId)> = shared_profile_ids + .into_iter() + .filter_map(|(profile_id, file_id)| file_ids.get(&file_id).map(|x| (profile_id, *x))) + .collect(); + let (client_profile_ids, version_ids): (Vec<_>, Vec<_>) = + versions_to_add.iter().map(|x| (x.0 .0, x.1 .0)).unzip(); + sqlx::query!( + " + INSERT INTO shared_profiles_versions (shared_profile_id, version_id) + SELECT * FROM UNNEST($1::bigint[], $2::bigint[]) + ", + &client_profile_ids[..], + &version_ids[..], + ) + .execute(&mut **transaction) + .await?; + + // Set updated of all hit profiles + sqlx::query!( + " + UPDATE shared_profiles + SET updated = NOW() + WHERE id = ANY($1::bigint[]) + ", + &client_profile_ids[..], + ) + .execute(&mut **transaction) + .await?; + + // Clear cache of all hit profiles + for profile_id in client_profile_ids { + client_profile_item::ClientProfile::clear_cache(ClientProfileId(profile_id), redis).await?; + } + + Ok(()) +} diff --git a/src/database/models/ids.rs b/src/database/models/ids.rs index d7e4a97a..f488a8bf 100644 --- a/src/database/models/ids.rs +++ b/src/database/models/ids.rs @@ -192,6 +192,22 @@ generate_ids!( PayoutId ); +generate_ids!( + pub generate_client_profile_id, + ClientProfileId, + 8, + "SELECT EXISTS(SELECT 1 FROM shared_profiles WHERE id=$1)", + ClientProfileId +); + +generate_ids!( + pub generate_client_profile_link_id, + ClientProfileLinkId, + 8, + "SELECT EXISTS(SELECT 1 FROM shared_profiles_links WHERE id=$1)", + ClientProfileLinkId +); + #[derive(Copy, Clone, Debug, PartialEq, Eq, Type, Hash, Serialize, Deserialize)] #[sqlx(transparent)] pub struct UserId(pub i64); @@ -311,6 +327,14 @@ pub struct OAuthAccessTokenId(pub i64); #[sqlx(transparent)] pub struct PayoutId(pub i64); +#[derive(Copy, Clone, Debug, Type, Serialize, Deserialize, Eq, PartialEq, Hash)] +#[sqlx(transparent)] +pub struct ClientProfileId(pub i64); + +#[derive(Copy, Clone, Debug, Type, Serialize, Deserialize, Eq, PartialEq, Hash)] +#[sqlx(transparent)] +pub struct ClientProfileLinkId(pub i64); + use crate::models::ids; impl From for ProjectId { @@ -464,3 +488,25 @@ impl From for ids::PayoutId { ids::PayoutId(id.0 as u64) } } + +impl From for ClientProfileId { + fn from(id: ids::ClientProfileId) -> Self { + ClientProfileId(id.0 as i64) + } +} + +impl From for ids::ClientProfileId { + fn from(id: ClientProfileId) -> Self { + ids::ClientProfileId(id.0 as u64) + } +} +impl From for ClientProfileLinkId { + fn from(id: ids::ClientProfileLinkId) -> Self { + ClientProfileLinkId(id.0 as i64) + } +} +impl From for ids::ClientProfileLinkId { + fn from(id: ClientProfileLinkId) -> Self { + ids::ClientProfileLinkId(id.0 as u64) + } +} diff --git a/src/database/models/mod.rs b/src/database/models/mod.rs index eb931f7d..729f572a 100644 --- a/src/database/models/mod.rs +++ b/src/database/models/mod.rs @@ -1,7 +1,9 @@ use thiserror::Error; pub mod categories; +pub mod client_profile_item; pub mod collection_item; +pub mod file_item; pub mod flow_item; pub mod ids; pub mod image_item; diff --git a/src/database/models/version_item.rs b/src/database/models/version_item.rs index eeb6a965..35657cee 100644 --- a/src/database/models/version_item.rs +++ b/src/database/models/version_item.rs @@ -1,6 +1,7 @@ -use super::ids::*; +use super::file_item::VersionFileBuilder; use super::loader_fields::VersionField; use super::DatabaseError; +use super::{file_item, ids::*}; use crate::database::models::loader_fields::{ QueryLoaderField, QueryLoaderFieldEnumValue, QueryVersionField, }; @@ -115,64 +116,6 @@ impl DependencyBuilder { } } -#[derive(Clone, Debug)] -pub struct VersionFileBuilder { - pub url: String, - pub filename: String, - pub hashes: Vec, - pub primary: bool, - pub size: u32, - pub file_type: Option, -} - -impl VersionFileBuilder { - pub async fn insert( - self, - version_id: VersionId, - transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>, - ) -> Result { - let file_id = generate_file_id(&mut *transaction).await?; - - sqlx::query!( - " - INSERT INTO files (id, version_id, url, filename, is_primary, size, file_type) - VALUES ($1, $2, $3, $4, $5, $6, $7) - ", - file_id as FileId, - version_id as VersionId, - self.url, - self.filename, - self.primary, - self.size as i32, - self.file_type.map(|x| x.as_str()), - ) - .execute(&mut **transaction) - .await?; - - for hash in self.hashes { - sqlx::query!( - " - INSERT INTO hashes (file_id, algorithm, hash) - VALUES ($1, $2, $3) - ", - file_id as FileId, - hash.algorithm, - hash.hash, - ) - .execute(&mut **transaction) - .await?; - } - - Ok(file_id) - } -} - -#[derive(Clone, Debug)] -pub struct HashBuilder { - pub algorithm: String, - pub hash: Vec, -} - impl VersionBuilder { pub async fn insert( self, @@ -362,29 +305,23 @@ impl Version { .execute(&mut **transaction) .await?; - sqlx::query!( + let files = sqlx::query!( " - DELETE FROM hashes - WHERE EXISTS( - SELECT 1 FROM files WHERE - (files.version_id = $1) AND - (hashes.file_id = files.id) - ) - ", - id as VersionId - ) - .execute(&mut **transaction) - .await?; - - sqlx::query!( - " - DELETE FROM files - WHERE files.version_id = $1 + DELETE FROM versions_files + WHERE versions_files.version_id = $1 + RETURNING file_id ", id as VersionId, ) - .execute(&mut **transaction) - .await?; + .fetch_all(&mut **transaction) + .await? + .into_iter() + .map(|x| FileId(x.file_id)) + .collect::>(); + + // Check if any versions_files or shared_profiles_files still reference the file- these files should not be deleted + // Delete the files that are not referenced + file_item::remove_unreferenced_files(files, transaction).await?; // Sync dependencies @@ -658,9 +595,10 @@ impl Version { let reverse_file_map = DashMap::new(); let files : DashMap> = sqlx::query!( " - SELECT DISTINCT version_id, f.id, f.url, f.filename, f.is_primary, f.size, f.file_type + SELECT DISTINCT vf.version_id, f.id, f.url, f.filename, vf.is_primary, f.size, f.file_type FROM files f - WHERE f.version_id = ANY($1) + INNER JOIN versions_files vf ON vf.file_id = f.id + WHERE vf.version_id = ANY($1) ", &version_ids_parsed ).fetch(&mut *exec) @@ -793,7 +731,7 @@ impl Version { } } - QueryFile { + QueryVersionFile { id: x.id, url: x.url.clone(), filename: x.filename.clone(), @@ -904,17 +842,23 @@ impl Version { if !file_ids_parsed.is_empty() { let db_files: Vec = sqlx::query!( " - SELECT f.id, f.version_id, v.mod_id, f.url, f.filename, f.is_primary, f.size, f.file_type, + SELECT f.id, vf.version_id, v.mod_id, f.url, f.filename, vf.is_primary, f.size, f.file_type, JSONB_AGG(DISTINCT jsonb_build_object('algorithm', h.algorithm, 'hash', encode(h.hash, 'escape'))) filter (where h.hash is not null) hashes FROM files f - INNER JOIN versions v on v.id = f.version_id + INNER JOIN versions_files vf on vf.file_id = f.id + INNER JOIN versions v on v.id = vf.version_id + INNER JOIN mods m on m.id = v.mod_id AND m.status = ANY($3) INNER JOIN hashes h on h.file_id = f.id WHERE h.algorithm = $1 AND h.hash = ANY($2) - GROUP BY f.id, v.mod_id, v.date_published + GROUP BY f.id, v.mod_id, v.date_published, vf.version_id, vf.is_primary ORDER BY v.date_published ", algorithm, &file_ids_parsed.into_iter().map(|x| x.as_bytes().to_vec()).collect::>(), + &*crate::models::projects::ProjectStatus::iterator() + .filter(|x| x.is_approved()) + .map(|x| x.to_string()) + .collect::>(), ) .fetch_many(executor) .try_filter_map(|e| async { @@ -997,7 +941,7 @@ impl Version { pub struct QueryVersion { pub inner: Version, - pub files: Vec, + pub files: Vec, pub version_fields: Vec, pub loaders: Vec, pub project_types: Vec, @@ -1014,7 +958,7 @@ pub struct QueryDependency { } #[derive(Clone, Deserialize, Serialize, PartialEq, Eq)] -pub struct QueryFile { +pub struct QueryVersionFile { pub id: FileId, pub url: String, pub filename: String, diff --git a/src/models/mod.rs b/src/models/mod.rs index b1a12c9b..6b8dda7a 100644 --- a/src/models/mod.rs +++ b/src/models/mod.rs @@ -3,6 +3,7 @@ pub mod v2; pub mod v3; pub use v3::analytics; +pub use v3::client; pub use v3::collections; pub use v3::ids; pub use v3::images; diff --git a/src/models/v3/client/mod.rs b/src/models/v3/client/mod.rs new file mode 100644 index 00000000..6b76aba6 --- /dev/null +++ b/src/models/v3/client/mod.rs @@ -0,0 +1 @@ +pub mod profile; diff --git a/src/models/v3/client/profile.rs b/src/models/v3/client/profile.rs new file mode 100644 index 00000000..dd3fdb07 --- /dev/null +++ b/src/models/v3/client/profile.rs @@ -0,0 +1,132 @@ +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; + +use crate::{ + database, + models::ids::{Base62Id, UserId}, +}; + +/// The ID of a specific profile, encoded as base62 for usage in the API +#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[serde(from = "Base62Id")] +#[serde(into = "Base62Id")] +pub struct ClientProfileId(pub u64); + +/// The ID of a specific profile link, encoded as base62 for usage in the API +#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)] +#[serde(from = "Base62Id")] +#[serde(into = "Base62Id")] +pub struct ClientProfileLinkId(pub u64); + +/// A project returned from the API +#[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Debug)] +pub struct ClientProfile { + /// The ID of the profile, encoded as a base62 string. + pub id: ClientProfileId, + + /// The person that has ownership of this profile. + pub owner_id: UserId, + /// The title or name of the project. + pub name: String, + /// The date at which the project was first created. + pub created: DateTime, + /// The date at which the project was last updated (versions/override were added/removed) + pub updated: DateTime, + /// The icon of the project. + pub icon_url: Option, + + /// The loader + pub loader: String, + + /// Game-specific information + #[serde(flatten)] + pub game: ClientProfileMetadata, + + // The following fields are hidden if the user is not the owner + /// The share links for this profile + pub share_links: Option>, + // Users that are associated with this profile + pub users: Option>, +} + +#[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Debug)] +#[serde(tag = "game")] +pub enum ClientProfileMetadata { + #[serde(rename = "minecraft-java")] + Minecraft { + /// Client game version id + game_version: String, + /// Loader version + loader_version: String, + }, + #[serde(rename = "unknown")] + Unknown, +} + +impl From for ClientProfileMetadata { + fn from(game: database::models::client_profile_item::ClientProfileMetadata) -> Self { + match game { + database::models::client_profile_item::ClientProfileMetadata::Minecraft { + loader_version, + game_version, + .. + } => Self::Minecraft { + loader_version, + game_version, + }, + database::models::client_profile_item::ClientProfileMetadata::Unknown { .. } => { + Self::Unknown + } + } + } +} + +impl ClientProfile { + pub fn from( + profile: database::models::client_profile_item::QueryClientProfile, + current_user_id: Option, + ) -> Self { + let mut users = None; + let mut share_links = None; + if Some(profile.inner.owner_id) == current_user_id { + users = Some(profile.inner.users.into_iter().map(|v| v.into()).collect()); + share_links = Some(profile.links.into_iter().map(|v| v.into()).collect()); + }; + + Self { + id: profile.inner.id.into(), + owner_id: profile.inner.owner_id.into(), + name: profile.inner.name, + created: profile.inner.created, + updated: profile.inner.updated, + icon_url: profile.inner.icon_url, + users, + loader: profile.inner.loader, + game: profile.inner.metadata.into(), + share_links, + } + } +} + +#[derive(Serialize, Deserialize, Clone, PartialEq, Eq, Debug)] +pub struct ClientProfileShareLink { + pub id: ClientProfileLinkId, // The url identifier, encoded as base62 + pub profile_id: ClientProfileId, + pub created: DateTime, + pub expires: DateTime, +} + +impl From for ClientProfileShareLink { + fn from(link: database::models::client_profile_item::ClientProfileLink) -> Self { + // Generate URL for easy access + let profile_id: ClientProfileId = link.shared_profile_id.into(); + let link_id: ClientProfileLinkId = link.id.into(); + + Self { + id: link_id, + profile_id, + created: link.created, + expires: link.expires, + } + } +} diff --git a/src/models/v3/ids.rs b/src/models/v3/ids.rs index 73d0c32c..ae87563e 100644 --- a/src/models/v3/ids.rs +++ b/src/models/v3/ids.rs @@ -1,5 +1,7 @@ use thiserror::Error; +pub use super::client::profile::ClientProfileId; +pub use super::client::profile::ClientProfileLinkId; pub use super::collections::CollectionId; pub use super::images::ImageId; pub use super::notifications::NotificationId; @@ -129,6 +131,8 @@ base62_id_impl!(OAuthClientId, OAuthClientId); base62_id_impl!(OAuthRedirectUriId, OAuthRedirectUriId); base62_id_impl!(OAuthClientAuthorizationId, OAuthClientAuthorizationId); base62_id_impl!(PayoutId, PayoutId); +base62_id_impl!(ClientProfileId, ClientProfileId); +base62_id_impl!(ClientProfileLinkId, ClientProfileLinkId); pub mod base62_impl { use serde::de::{self, Deserializer, Visitor}; diff --git a/src/models/v3/mod.rs b/src/models/v3/mod.rs index 34f5836b..fdcc8af8 100644 --- a/src/models/v3/mod.rs +++ b/src/models/v3/mod.rs @@ -1,4 +1,5 @@ pub mod analytics; +pub mod client; pub mod collections; pub mod ids; pub mod images; diff --git a/src/models/v3/pats.rs b/src/models/v3/pats.rs index d4ef6e28..685edf15 100644 --- a/src/models/v3/pats.rs +++ b/src/models/v3/pats.rs @@ -106,6 +106,14 @@ bitflags::bitflags! { // only accessible by modrinth-issued sessions const SESSION_ACCESS = 1 << 39; + // create a client profile + const CLIENT_PROFILE_CREATE = 1 << 40; + // edit a client profile + const CLIENT_PROFILE_WRITE = 1 << 41; + // download a client profile + const CLIENT_PROFILE_DOWNLOAD = 1 << 42; + + const NONE = 0b0; } } diff --git a/src/routes/internal/admin.rs b/src/routes/internal/admin.rs index 07afe836..d082d108 100644 --- a/src/routes/internal/admin.rs +++ b/src/routes/internal/admin.rs @@ -66,8 +66,10 @@ pub async fn count_download( let (version_id, project_id) = if let Some(version) = sqlx::query!( " - SELECT v.id id, v.mod_id mod_id FROM files f - INNER JOIN versions v ON v.id = f.version_id + SELECT v.id id, v.mod_id mod_id + FROM files f + INNER JOIN versions_files vf ON vf.file_id = f.id + INNER JOIN versions v ON v.id = vf.version_id WHERE f.url = $1 ", download_body.url, diff --git a/src/routes/internal/client/mod.rs b/src/routes/internal/client/mod.rs new file mode 100644 index 00000000..0a9c08b6 --- /dev/null +++ b/src/routes/internal/client/mod.rs @@ -0,0 +1 @@ +pub mod profiles; diff --git a/src/routes/internal/client/profiles.rs b/src/routes/internal/client/profiles.rs new file mode 100644 index 00000000..e8815ca2 --- /dev/null +++ b/src/routes/internal/client/profiles.rs @@ -0,0 +1,1521 @@ +use crate::auth::checks::filter_visible_version_ids; +use crate::auth::{get_user_from_headers, AuthenticationError}; +use crate::database::models::file_item::ClientProfileFileBuilder; +use crate::database::models::legacy_loader_fields::MinecraftGameVersion; +use crate::database::models::{ + client_profile_item, file_item, generate_client_profile_id, generate_client_profile_link_id, + version_item, FileId, +}; +use crate::database::redis::RedisPool; +use crate::file_hosting::FileHost; +use crate::models::client::profile::{ + ClientProfile, ClientProfileId, ClientProfileLinkId, ClientProfileShareLink, +}; +use crate::models::ids::base62_impl::parse_base62; +use crate::models::ids::{UserId, VersionId}; +use crate::models::pats::Scopes; +use crate::models::projects::FileType; +use crate::queue::session::AuthQueue; +use crate::routes::v3::project_creation::{CreateError, UploadedFile}; +use crate::routes::v3::version_creation::get_name_ext; +use crate::routes::v3::version_file::default_algorithm_from_hashes; +use crate::routes::ApiError; +use crate::util::routes::{read_from_field, read_from_payload}; +use crate::util::validate::validation_errors_to_string; +use crate::{database, models}; +use actix_multipart::{Field, Multipart}; +use actix_web::web::Data; +use actix_web::{web, HttpRequest, HttpResponse}; +use chrono::Utc; +use database::models::ids::ClientProfileId as DBClientProfileId; +use futures::StreamExt; +use itertools::Itertools; +use rand::distributions::Alphanumeric; +use rand::{Rng, SeedableRng}; +use rand_chacha::ChaCha20Rng; +use serde::{Deserialize, Serialize}; +use sha2::Digest; +use sqlx::PgPool; +use std::collections::HashMap; +use std::path::PathBuf; +use std::sync::Arc; +use validator::Validate; + +pub fn config(cfg: &mut web::ServiceConfig) { + cfg.service( + web::scope("client") + .route("profile", web::post().to(profile_create)) + .route("profiles", web::get().to(profiles_get)) + .route("user", web::get().to(user_profiles_get)) + .route("check_token", web::get().to(profile_token_check)) + .service( + web::scope("share") + .route("{id}", web::get().to(profile_get_share_link)) + .route("{id}/accept", web::post().to(accept_share_link)), + ) + .service( + web::scope("profile") + .route("{id}", web::get().to(profile_get)) + .route("{id}", web::patch().to(profile_edit)) + .route("{id}", web::delete().to(profile_delete)) + .route("{id}/files", web::get().to(profile_files)) + .route("{id}/override", web::post().to(client_profile_add_override)) + .route( + "{id}/override", + web::delete().to(client_profile_remove_overrides), + ) + .route("{id}/share", web::post().to(profile_share)) + .route("{id}/icon", web::patch().to(profile_icon_edit)) + .route("{id}/icon", web::delete().to(delete_profile_icon)), + ), + ); +} + +#[derive(Serialize, Deserialize, Validate, Clone)] +pub struct ProfileCreateData { + #[validate( + length(min = 3, max = 64), + custom(function = "crate::util::validate::validate_name") + )] + /// The title or name of the profile. + pub name: String, + // The loader string (parsed to a loader) + pub loader: String, + // The loader version + pub loader_version: String, + // The list of versions to include in the profile (does not include overrides) + pub versions: Vec, + + #[serde(flatten)] + pub game: ProfileCreateDataGame, +} + +#[derive(Serialize, Deserialize, Clone)] +#[serde(tag = "game")] +pub enum ProfileCreateDataGame { + #[serde(rename = "minecraft-java")] + MinecraftJava { + // The game version string (parsed to a game version) + game_version: String, + }, +} + +// Create a new client profile +pub async fn profile_create( + req: HttpRequest, + profile_create_data: web::Json, + client: Data, + redis: Data, + session_queue: Data, +) -> Result { + let profile_create_data = profile_create_data.into_inner(); + // The currently logged in user + let current_user = get_user_from_headers( + &req, + &**client, + &redis, + &session_queue, + Some(&[Scopes::CLIENT_PROFILE_CREATE]), + ) + .await? + .1; + + profile_create_data + .validate() + .map_err(|err| CreateError::InvalidInput(validation_errors_to_string(err, None)))?; + + let game_id; + let game_name; + let game: client_profile_item::ClientProfileMetadata = match profile_create_data.game { + ProfileCreateDataGame::MinecraftJava { game_version } => { + let game = database::models::loader_fields::Game::get_slug( + "minecraft-java", + &**client, + &redis, + ) + .await? + .ok_or_else(|| CreateError::InvalidInput("Invalid Client game".to_string()))?; + + game_id = game.id; + game_name = game.name; + + let game_version_id = MinecraftGameVersion::list(None, None, &**client, &redis) + .await? + .into_iter() + .find(|x| x.version == game_version) + .ok_or_else(|| { + CreateError::InvalidInput("Invalid Client game version".to_string()) + })? + .id; + + client_profile_item::ClientProfileMetadata::Minecraft { + loader_version: profile_create_data.loader_version, + game_version_id, + game_version, + } + } + }; + + let loader_id = database::models::loader_fields::Loader::get_id( + &profile_create_data.loader, + &**client, + &redis, + ) + .await? + .ok_or_else(|| CreateError::InvalidInput("Invalid loader".to_string()))?; + + let mut transaction = client.begin().await?; + + let profile_id: database::models::ClientProfileId = + generate_client_profile_id(&mut transaction).await?; + + let version_ids = profile_create_data + .versions + .into_iter() + .map(|x| x.into()) + .collect::>(); + let versions = version_item::Version::get_many(&version_ids, &**client, &redis) + .await? + .into_iter() + .map(|x| x.inner) + .collect::>(); + + // Filters versions authorized to see + let versions = filter_visible_version_ids( + versions.iter().collect_vec(), + &Some(current_user.clone()), + &client, + &redis, + ) + .await + .map_err(|_| CreateError::InvalidInput("Could not fetch submitted version ids".to_string()))?; + + let profile_builder_actual = client_profile_item::ClientProfile { + id: profile_id, + name: profile_create_data.name.clone(), + owner_id: current_user.id.into(), + icon_url: None, + created: Utc::now(), + updated: Utc::now(), + metadata: game, + game_id, + game_name, + loader_id, + loader: profile_create_data.loader, + users: vec![current_user.id.into()], + versions, + }; + let profile_builder = profile_builder_actual.clone(); + profile_builder_actual.insert(&mut transaction).await?; + transaction.commit().await?; + + let profile = client_profile_item::QueryClientProfile { + inner: profile_builder, + links: Vec::new(), + override_files: Vec::new(), + }; + + let profile = + models::client::profile::ClientProfile::from(profile, Some(current_user.id.into())); + Ok(HttpResponse::Ok().json(profile)) +} + +#[derive(Serialize, Deserialize)] +pub struct ClientProfileIds { + pub ids: String, +} +// Get several client profiles by their ids +pub async fn profiles_get( + req: HttpRequest, + web::Query(ids): web::Query, + pool: web::Data, + redis: web::Data, + session_queue: web::Data, +) -> Result { + let user_id = get_user_from_headers( + &req, + &**pool, + &redis, + &session_queue, + None, // No scopes required to read your own links + ) + .await + .ok() + .map(|x| x.1.id.into()); + + // In addition, private information (ie: CDN links, tokens, anything outside of the list of hosted versions and install paths) is not returned + let ids = serde_json::from_str::>(&ids.ids)?; + let ids = ids + .into_iter() + .map(|x| parse_base62(x).map(|x| database::models::ClientProfileId(x as i64))) + .collect::, _>>()?; + + let profiles_data = + database::models::client_profile_item::ClientProfile::get_many(&ids, &**pool, &redis) + .await?; + let profiles = profiles_data + .into_iter() + .map(|x| ClientProfile::from(x, user_id)) + .collect::>(); + + Ok(HttpResponse::Ok().json(profiles)) +} + +// Get all a user's client profiles +pub async fn user_profiles_get( + req: HttpRequest, + pool: web::Data, + redis: web::Data, + session_queue: web::Data, +) -> Result { + let user_id = get_user_from_headers( + &req, + &**pool, + &redis, + &session_queue, + None, // No scopes required to read your own links + ) + .await + .ok() + .map(|x| x.1.id.into()); + + let profile_ids = database::models::client_profile_item::ClientProfile::get_ids_for_user( + user_id.unwrap(), + &**pool, + ) + .await?; + let profiles_data = database::models::client_profile_item::ClientProfile::get_many( + &profile_ids, + &**pool, + &redis, + ) + .await?; + let profiles = profiles_data + .into_iter() + .map(|x| ClientProfile::from(x, user_id)) + .collect::>(); + + Ok(HttpResponse::Ok().json(profiles)) +} + +// Get a client profile by its id +pub async fn profile_get( + req: HttpRequest, + info: web::Path<(String,)>, + pool: web::Data, + redis: web::Data, + session_queue: web::Data, +) -> Result { + let string = info.into_inner().0; + + let user_id = get_user_from_headers( + &req, + &**pool, + &redis, + &session_queue, + None, // No scopes required to read your own links + ) + .await + .ok() + .map(|x| x.1.id.into()); + + // No user check ,as any user/scope can view profiles. + // In addition, private information (ie: CDN links, tokens, anything outside of the list of hosted versions and install paths) is not returned + let id = database::models::ClientProfileId(parse_base62(&string)? as i64); + let profile_data = + database::models::client_profile_item::ClientProfile::get(id, &**pool, &redis).await?; + if let Some(data) = profile_data { + return Ok(HttpResponse::Ok().json(ClientProfile::from(data, user_id))); + } + Err(ApiError::NotFound) +} + +#[derive(Serialize, Deserialize, Validate, Clone)] +pub struct EditClientProfile { + #[validate( + length(min = 3, max = 64), + custom(function = "crate::util::validate::validate_name") + )] + /// The title or name of the profile. + pub name: Option, + #[validate( + custom(function = "crate::util::validate::validate_url"), + length(max = 255) + )] + // The loader string (parsed to a loader) + pub loader: Option, + // The list of versions to include in the profile (does not include overrides) + pub versions: Option>, + // You can remove users from your invite list here + pub remove_users: Option>, + // You can remove share links here (by id) + pub remove_links: Option>, + + // As these fields affect metadata but do not yet use the 'loader_fields' system, + // we simply list them here and compare them to the existing metadata. + // The loader version + pub loader_version: Option, + // The game version string (parsed to a game version) + pub game_version: Option, +} + +// Edit a client profile +pub async fn profile_edit( + req: HttpRequest, + info: web::Path<(String,)>, + edit_data: web::Json, + pool: web::Data, + redis: web::Data, + session_queue: web::Data, +) -> Result { + let string = info.into_inner().0; + let edit_data = edit_data.into_inner(); + // Must be logged in to edit + let user_option = get_user_from_headers( + &req, + &**pool, + &redis, + &session_queue, + Some(&[Scopes::CLIENT_PROFILE_WRITE]), + ) + .await?; + + // Confirm this is our project, then if so, edit + let id = database::models::ClientProfileId(parse_base62(&string)? as i64); + let mut transaction = pool.begin().await?; + let profile_data = + database::models::client_profile_item::ClientProfile::get(id, &mut *transaction, &redis) + .await?; + + if let Some(data) = profile_data { + if data.inner.owner_id == user_option.1.id.into() { + // Edit the profile + if let Some(name) = edit_data.name { + sqlx::query!( + "UPDATE shared_profiles SET name = $1 WHERE id = $2", + name, + data.inner.id.0 + ) + .execute(&mut *transaction) + .await?; + } + if let Some(loader) = edit_data.loader { + let loader_id = database::models::loader_fields::Loader::get_id( + &loader, + &mut *transaction, + &redis, + ) + .await? + .ok_or_else(|| ApiError::InvalidInput("Invalid loader".to_string()))?; + + sqlx::query!( + "UPDATE shared_profiles SET loader_id = $1 WHERE id = $2", + loader_id.0, + data.inner.id.0 + ) + .execute(&mut *transaction) + .await?; + } + if let Some(versions) = edit_data.versions { + let version_ids = versions.into_iter().map(|x| x.into()).collect::>(); + let versions = + version_item::Version::get_many(&version_ids, &mut *transaction, &redis) + .await? + .into_iter() + .map(|x| x.inner) + .collect::>(); + + // Filters versions authorized to see + let versions = filter_visible_version_ids( + versions.iter().collect_vec(), + &Some(user_option.1.clone()), + &pool, + &redis, + ) + .await + .map_err(|_| { + ApiError::InvalidInput("Could not fetch submitted version ids".to_string()) + })?; + + // Remove all shared profile versions of this profile + sqlx::query!( + "DELETE FROM shared_profiles_versions WHERE shared_profile_id = $1", + data.inner.id.0 + ) + .execute(&mut *transaction) + .await?; + + // Insert all new shared profile mods + for v in versions { + sqlx::query!( + "INSERT INTO shared_profiles_versions (shared_profile_id, version_id) VALUES ($1, $2)", + data.inner.id.0, + v.0 + ) + .execute(&mut *transaction) + .await?; + } + + // Set updated + sqlx::query!( + " + UPDATE shared_profiles + SET updated = NOW() + WHERE id = $1 + ", + data.inner.id.0, + ) + .execute(&mut *transaction) + .await?; + } + if let Some(remove_users) = edit_data.remove_users { + for user in remove_users { + // Remove user from list + sqlx::query!( + "DELETE FROM shared_profiles_users WHERE shared_profile_id = $1 AND user_id = $2", + data.inner.id.0 as i64, + user.0 as i64 + ) + .execute(&mut *transaction) + .await?; + } + } + + if let Some(remove_links) = edit_data.remove_links { + for link in remove_links { + // Remove link from list + sqlx::query!( + "DELETE FROM shared_profiles_links WHERE shared_profile_id = $1 AND id = $2", + data.inner.id.0 as i64, + link.0 as i64 + ) + .execute(&mut *transaction) + .await?; + } + } + + // Edit the metadata fields + if edit_data.loader_version.is_some() || edit_data.game_version.is_some() { + let mut metadata = data.inner.metadata.clone(); + + match &mut metadata { + client_profile_item::ClientProfileMetadata::Minecraft { + loader_version, + game_version_id, + game_version, + } => { + if let Some(new_loader_version) = edit_data.loader_version { + *loader_version = new_loader_version; + } + + if let Some(new_game_version) = edit_data.game_version { + let new_game_id = + database::models::legacy_loader_fields::MinecraftGameVersion::list( + None, None, &**pool, &redis, + ) + .await? + .into_iter() + .find(|x| x.version == new_game_version) + .ok_or_else(|| { + ApiError::InvalidInput( + "Invalid Client game version".to_string(), + ) + })? + .id; + + *game_version_id = new_game_id; + *game_version = new_game_version; + } + } + client_profile_item::ClientProfileMetadata::Unknown => { + return Err(ApiError::InvalidInput( + "Cannot edit metadata of unknown profile".to_string(), + )); + } + } + + sqlx::query!( + "UPDATE shared_profiles SET metadata = $1 WHERE id = $2", + serde_json::to_value(metadata)?, + data.inner.id.0 + ) + .execute(&mut *transaction) + .await?; + } + + transaction.commit().await?; + client_profile_item::ClientProfile::clear_cache(data.inner.id, &redis).await?; + return Ok(HttpResponse::NoContent().finish()); + } else { + return Err(ApiError::CustomAuthentication( + "You are not the owner of this profile".to_string(), + )); + } + } + Err(ApiError::NotFound) +} + +// Delete a client profile +pub async fn profile_delete( + req: HttpRequest, + info: web::Path<(String,)>, + pool: web::Data, + redis: web::Data, + session_queue: web::Data, + file_host: web::Data>, +) -> Result { + let string = info.into_inner().0; + + // Must be logged in to delete + let user_option = get_user_from_headers( + &req, + &**pool, + &redis, + &session_queue, + Some(&[Scopes::CLIENT_PROFILE_WRITE]), + ) + .await?; + + // Confirm this is our project, then if so, delete + let id = database::models::ClientProfileId(parse_base62(&string)? as i64); + let profile_data = + database::models::client_profile_item::ClientProfile::get(id, &**pool, &redis).await?; + if let Some(data) = profile_data { + if data.inner.owner_id == user_option.1.id.into() { + let mut transaction = pool.begin().await?; + + let deleted_hashes = database::models::client_profile_item::ClientProfile::remove( + data.inner.id, + &mut transaction, + &redis, + ) + .await?; + transaction.commit().await?; + client_profile_item::ClientProfile::clear_cache(data.inner.id, &redis).await?; + + // Delete the files from the CDN if they are no longer used by any profile + delete_unused_files_from_host(deleted_hashes, &pool, &file_host).await?; + + return Ok(HttpResponse::NoContent().finish()); + } else if data.inner.users.contains(&user_option.1.id.into()) { + // We know it exists, but still can't delete it + return Err(ApiError::CustomAuthentication( + "You are not the owner of this profile".to_string(), + )); + } + } + + Err(ApiError::NotFound) +} + +// Share a client profile with a friend. +// This generates a link struct, including the field 'url' +// that can be shared with friends to generate a token a limited number of times. +// TODO: This link should not be an API link, but a modrinth:// link that is translatable to an API link by the launcher +pub async fn profile_share( + req: HttpRequest, + info: web::Path<(String,)>, + pool: web::Data, + redis: web::Data, + session_queue: web::Data, +) -> Result { + let string = info.into_inner().0; + + // Must be logged in to share + let user_option = get_user_from_headers( + &req, + &**pool, + &redis, + &session_queue, + Some(&[Scopes::CLIENT_PROFILE_WRITE]), + ) + .await?; + + // Confirm this is our project, then if so, share + let id = database::models::ClientProfileId(parse_base62(&string)? as i64); + let profile_data = + database::models::client_profile_item::ClientProfile::get(id, &**pool, &redis).await?; + + if let Some(data) = profile_data { + if data.inner.owner_id == user_option.1.id.into() { + // Generate a share link identifier + let _identifier = ChaCha20Rng::from_entropy() + .sample_iter(&Alphanumeric) + .take(8) + .map(char::from) + .collect::(); + + // Generate a new share link id + let mut transaction = pool.begin().await?; + let profile_link_id = generate_client_profile_link_id(&mut transaction).await?; + + let link = database::models::client_profile_item::ClientProfileLink { + id: profile_link_id, + shared_profile_id: data.inner.id, + created: Utc::now(), + expires: Utc::now() + chrono::Duration::days(7), + }; + link.insert(&mut transaction).await?; + transaction.commit().await?; + client_profile_item::ClientProfile::clear_cache(data.inner.id, &redis).await?; + return Ok(HttpResponse::Ok().json(ClientProfileShareLink::from(link))); + } + } + Err(ApiError::NotFound) +} + +// Get a profile's basic information by its share link id +pub async fn profile_get_share_link( + req: HttpRequest, + info: web::Path<(ClientProfileLinkId,)>, + pool: web::Data, + redis: web::Data, + session_queue: web::Data, +) -> Result { + let url_identifier = info.into_inner().0; + + // Must be logged + let user_option = get_user_from_headers( + &req, + &**pool, + &redis, + &session_queue, + None, // No scopes required to read your own links + ) + .await? + .1; + + // Fetch the profile information of the desired client profile + let link_data = database::models::client_profile_item::ClientProfileLink::get( + url_identifier.into(), + &**pool, + ) + .await? + .ok_or_else(|| ApiError::NotFound)?; + + let data = database::models::client_profile_item::ClientProfile::get( + link_data.shared_profile_id, + &**pool, + &redis, + ) + .await? + .ok_or_else(|| ApiError::NotFound)?; + + // Return the profile information + Ok(HttpResponse::Ok().json(ClientProfile::from(data, Some(user_option.id.into())))) +} + +// Accept a share link to a profile +// This adds the user to the team +// TODO: With above change, this is the API link that is translated from a modrinth:// link by the launcher, which would then download it +pub async fn accept_share_link( + req: HttpRequest, + info: web::Path<(ClientProfileLinkId,)>, + pool: web::Data, + redis: web::Data, + session_queue: web::Data, +) -> Result { + let url_identifier = info.into_inner().0; + + // Must be logged in to accept + let user_option = get_user_from_headers( + &req, + &**pool, + &redis, + &session_queue, + Some(&[Scopes::CLIENT_PROFILE_WRITE]), + ) + .await?; + + // Fetch the profile information of the desired client profile + let link_data = database::models::client_profile_item::ClientProfileLink::get( + url_identifier.into(), + &**pool, + ) + .await? + .ok_or_else(|| ApiError::NotFound)?; + + let data = database::models::client_profile_item::ClientProfile::get( + link_data.shared_profile_id, + &**pool, + &redis, + ) + .await? + .ok_or_else(|| ApiError::NotFound)?; + + // Confirm this is not our profile + if data.inner.owner_id == user_option.1.id.into() { + return Err(ApiError::InvalidInput( + "You cannot accept your own share link".to_string(), + )); + } + + // Confirm we are not already on the team + if data + .inner + .users + .iter() + .any(|x| *x == user_option.1.id.into()) + { + return Err(ApiError::InvalidInput( + "You are already on this profile's team".to_string(), + )); + } + + // Add the user to the team + sqlx::query!( + "INSERT INTO shared_profiles_users (shared_profile_id, user_id) VALUES ($1, $2)", + data.inner.id.0 as i64, + user_option.1.id.0 as i64 + ) + .execute(&**pool) + .await?; + client_profile_item::ClientProfile::clear_cache(data.inner.id, &redis).await?; + + Ok(HttpResponse::NoContent().finish()) +} + +#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)] +pub struct ProfileDownload { + // Version ids for modrinth-hosted versions + pub version_ids: Vec, + + // The override cdns for the profile: + // (cdn url, install path) + pub override_cdns: Vec, +} + +#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)] +pub struct ProfileOverride { + pub url: String, + pub install_path: PathBuf, + pub hashes: HashMap, +} + +// Download a client profile (gets files) +// This one can use profile id, so fields can be accessed if no share link is available +// Only the owner of the profile or an invited user can download +pub async fn profile_files( + req: HttpRequest, + info: web::Path<(ClientProfileId,)>, + pool: web::Data, + redis: web::Data, + session_queue: web::Data, +) -> Result { + let profile_id = info.into_inner().0; + + // Must be logged in to download + let user_option = get_user_from_headers( + &req, + &**pool, + &redis, + &session_queue, + Some(&[Scopes::CLIENT_PROFILE_DOWNLOAD]), + ) + .await?; + + // Fetch the profile information of the desired client profile + let Some(profile) = database::models::client_profile_item::ClientProfile::get( + profile_id.into(), + &**pool, + &redis, + ) + .await? + else { + return Err(ApiError::NotFound); + }; + + // Check if this user is on the profile user list + if !profile.inner.users.contains(&user_option.1.id.into()) { + return Err(ApiError::CustomAuthentication( + "You are not on this profile's team".to_string(), + )); + } + + let override_cdns = profile + .override_files + .into_iter() + .map(|x| ProfileOverride { + url: x.url, + install_path: x.install_path, + hashes: x.hashes, + }) + .collect::>(); + + Ok(HttpResponse::Ok().json(ProfileDownload { + version_ids: profile.inner.versions.iter().map(|x| (*x).into()).collect(), + override_cdns, + })) +} + +#[derive(Deserialize)] +pub struct TokenUrl { + pub url: String, // TODO: Could take a vec instead for mass checking- revisit after cloudflare worker is done +} + +// Used by cloudflare to check headers and permit CDN downloads for a pack +// Checks headers for 'authorization: xxyyzz' where xxyyzz is a valid user authorization token +// that allows for downloading of url 'url' +pub async fn profile_token_check( + req: HttpRequest, + file_url: web::Query, + pool: web::Data, + redis: web::Data, + session_queue: web::Data, +) -> Result { + let file_url = file_url.into_inner().url; + + let user = get_user_from_headers( + &req, + &**pool, + &redis, + &session_queue, + Some(&[Scopes::CLIENT_PROFILE_DOWNLOAD]), + ) + .await? + .1; + + // Get all profiles for the user + let profile_ids = database::models::client_profile_item::ClientProfile::get_ids_for_user( + user.id.into(), + &**pool, + ) + .await?; + + let profiles = database::models::client_profile_item::ClientProfile::get_many( + &profile_ids, + &**pool, + &redis, + ) + .await?; + + let all_allowed_urls = profiles + .into_iter() + .flat_map(|x| x.override_files.into_iter().map(|x| x.url)) + .collect::>(); + + // Check the token is valid for the requested file + let valid = all_allowed_urls.iter().any(|x| x == &file_url); + + if !valid { + Err(ApiError::Authentication( + AuthenticationError::InvalidAuthMethod, + )) + } else { + Ok(HttpResponse::Ok().finish()) + } +} + +#[derive(Serialize, Deserialize)] +pub struct Extension { + pub ext: String, +} + +#[allow(clippy::too_many_arguments)] +pub async fn profile_icon_edit( + web::Query(ext): web::Query, + req: HttpRequest, + info: web::Path<(ClientProfileId,)>, + pool: web::Data, + redis: web::Data, + file_host: web::Data>, + mut payload: web::Payload, + session_queue: web::Data, +) -> Result { + if let Some(content_type) = crate::util::ext::get_image_content_type(&ext.ext) { + let cdn_url = dotenvy::var("CDN_URL")?; + let user = get_user_from_headers( + &req, + &**pool, + &redis, + &session_queue, + Some(&[Scopes::CLIENT_PROFILE_WRITE]), + ) + .await? + .1; + let id = info.into_inner().0; + + let profile_item = + database::models::client_profile_item::ClientProfile::get(id.into(), &**pool, &redis) + .await? + .ok_or_else(|| { + ApiError::InvalidInput("The specified profile does not exist!".to_string()) + })?; + + if !user.role.is_mod() && profile_item.inner.owner_id != user.id.into() { + return Err(ApiError::CustomAuthentication( + "You don't have permission to edit this profile's icon.".to_string(), + )); + } + + if let Some(icon) = profile_item.inner.icon_url { + let name = icon.split(&format!("{cdn_url}/")).nth(1); + + if let Some(icon_path) = name { + file_host.delete_file_version("", icon_path).await?; + } + } + + let bytes = + read_from_payload(&mut payload, 262144, "Icons must be smaller than 256KiB").await?; + + let color = crate::util::img::get_color_from_img(&bytes)?; + + let hash = format!("{:x}", sha2::Sha512::digest(&bytes)); + let id: ClientProfileId = profile_item.inner.id.into(); + let upload_data = file_host + .upload_file( + content_type, + &format!("data/{}/{}.{}", id, hash, ext.ext), + bytes.freeze(), + ) + .await?; + + let mut transaction = pool.begin().await?; + + sqlx::query!( + " + UPDATE shared_profiles + SET icon_url = $1, color = $2 + WHERE (id = $3) + ", + format!("{}/{}", cdn_url, upload_data.file_name), + color.map(|x| x as i32), + profile_item.inner.id as DBClientProfileId, + ) + .execute(&mut *transaction) + .await?; + + transaction.commit().await?; + database::models::client_profile_item::ClientProfile::clear_cache( + profile_item.inner.id, + &redis, + ) + .await?; + + Ok(HttpResponse::NoContent().body("")) + } else { + Err(ApiError::InvalidInput(format!( + "Invalid format for project icon: {}", + ext.ext + ))) + } +} + +pub async fn delete_profile_icon( + req: HttpRequest, + info: web::Path<(ClientProfileId,)>, + pool: web::Data, + redis: web::Data, + file_host: web::Data>, + session_queue: web::Data, +) -> Result { + let user = get_user_from_headers( + &req, + &**pool, + &redis, + &session_queue, + Some(&[Scopes::CLIENT_PROFILE_WRITE]), + ) + .await? + .1; + let id = info.into_inner().0; + + let profile_item = + database::models::client_profile_item::ClientProfile::get(id.into(), &**pool, &redis) + .await? + .ok_or_else(|| { + ApiError::InvalidInput("The specified profile does not exist!".to_string()) + })?; + + if !user.role.is_mod() && profile_item.inner.owner_id != user.id.into() { + return Err(ApiError::CustomAuthentication( + "You don't have permission to edit this profile's icon.".to_string(), + )); + } + + let cdn_url = dotenvy::var("CDN_URL")?; + if let Some(icon) = profile_item.inner.icon_url { + let name = icon.split(&format!("{cdn_url}/")).nth(1); + + if let Some(icon_path) = name { + file_host.delete_file_version("", icon_path).await?; + } + } + + let mut transaction = pool.begin().await?; + + sqlx::query!( + " + UPDATE shared_profiles + SET icon_url = NULL, color = NULL + WHERE (id = $1) + ", + profile_item.inner.id as DBClientProfileId, + ) + .execute(&mut *transaction) + .await?; + + transaction.commit().await?; + + database::models::client_profile_item::ClientProfile::clear_cache( + profile_item.inner.id, + &redis, + ) + .await?; + + Ok(HttpResponse::NoContent().body("")) +} + +// Add a new override mod to a client profile, by uploading it to the CDN +// Accepts a multipart field +// the first part is called `data` and contains a json array of objects with the following fields: +// file_name: String +// install_path: String +// The rest of the parts are files, and their install paths are matched to the install paths in the data field +#[derive(Serialize, Deserialize)] +pub struct MultipartFile { + pub file_name: String, + pub install_path: String, +} + +#[allow(clippy::too_many_arguments)] +pub async fn client_profile_add_override( + req: HttpRequest, + client_id: web::Path, + pool: web::Data, + redis: web::Data, + file_host: web::Data>, + mut payload: Multipart, + session_queue: web::Data, +) -> Result { + let client_id = client_id.into_inner(); + let user = get_user_from_headers( + &req, + &**pool, + &redis, + &session_queue, + Some(&[Scopes::CLIENT_PROFILE_WRITE]), + ) + .await? + .1; + + // Check if this is our profile + let profile_item = database::models::client_profile_item::ClientProfile::get( + client_id.into(), + &**pool, + &redis, + ) + .await? + .ok_or_else(|| { + CreateError::InvalidInput("The specified profile does not exist!".to_string()) + })?; + + if !user.role.is_mod() && profile_item.inner.owner_id != user.id.into() { + return Err(CreateError::CustomAuthenticationError( + "You don't have permission to add overrides.".to_string(), + )); + } + + let mut error = None; + let mut uploaded_files = Vec::new(); + + let files: Vec = { + // First, get the data field + let mut field = payload.next().await.ok_or_else(|| { + CreateError::InvalidInput(String::from("Upload must have a data field")) + })??; + + let content_disposition = field.content_disposition().clone(); + // Allow any content type + let name = content_disposition + .get_name() + .ok_or_else(|| CreateError::InvalidInput(String::from("Upload must have a name")))?; + + if name == "data" { + let mut d: Vec = Vec::new(); + while let Some(chunk) = field.next().await { + d.extend_from_slice(&chunk.map_err(CreateError::MultipartError)?); + } + serde_json::from_slice(&d)? + } else { + return Err(CreateError::InvalidInput(String::from( + "`data` field must come before file fields", + ))); + } + }; + + let mut client_profile_files = Vec::new(); + let mut transaction = pool.begin().await?; + + while let Some(item) = payload.next().await { + let mut field: Field = item?; + if error.is_some() { + continue; + } + let result = async { + let content_disposition = field.content_disposition().clone(); + let cdn_url = dotenvy::var("CDN_URL")?; + + // Upload file to CDN and get hash + upload_file( + &mut field, + &files, + &***file_host, + &content_disposition, + &cdn_url, + None, + &mut client_profile_files, + &mut uploaded_files, + &mut transaction, + ) + .await?; + Ok(()) + } + .await; + + if result.is_err() { + error = result.err(); + } + } + + if let Some(error) = error { + return Err(error); + } + + for file in client_profile_files { + file.insert(profile_item.inner.id, &mut transaction).await?; + } + + // Set updated + sqlx::query!( + " + UPDATE shared_profiles + SET updated = NOW() + WHERE id = $1 + ", + profile_item.inner.id.0, + ) + .execute(&mut *transaction) + .await?; + + transaction.commit().await?; + + database::models::client_profile_item::ClientProfile::clear_cache( + profile_item.inner.id, + &redis, + ) + .await?; + + Ok(HttpResponse::NoContent().body("")) +} + +#[derive(Serialize, Deserialize)] +pub struct RemoveOverrides { + // Either will work, or some combination, to identify the overrides to remove + pub install_paths: Option>, + pub algorithm: Option, + pub hashes: Option>, +} + +pub async fn client_profile_remove_overrides( + req: HttpRequest, + client_id: web::Path, + pool: web::Data, + data: web::Json, + redis: web::Data, + file_host: web::Data>, + session_queue: web::Data, +) -> Result { + let client_id = client_id.into_inner(); + let user = get_user_from_headers( + &req, + &**pool, + &redis, + &session_queue, + Some(&[Scopes::CLIENT_PROFILE_WRITE]), + ) + .await? + .1; + + // Check if this is our profile + let profile_item = database::models::client_profile_item::ClientProfile::get( + client_id.into(), + &**pool, + &redis, + ) + .await? + .ok_or_else(|| ApiError::InvalidInput("The specified profile does not exist!".to_string()))?; + + if !user.role.is_mod() && profile_item.inner.owner_id != user.id.into() { + return Err(ApiError::CustomAuthentication( + "You don't have permission to remove overrides.".to_string(), + )); + } + + let delete_hashes = data.hashes.clone().unwrap_or_default(); + let algorithm = data + .algorithm + .clone() + .unwrap_or_else(|| default_algorithm_from_hashes(&delete_hashes)); + let delete_install_paths = data.install_paths.clone().unwrap_or_default(); + + // TODO: ensure tested well + let overrides = profile_item + .override_files + .into_iter() + .map(|x| (x.hashes, x.install_path)) + .map(|(hashes, install_paths)| (hashes.get(&algorithm).cloned(), install_paths)) + .filter(|(hash, path)| { + hash.as_ref() + .map(|h| delete_hashes.contains(h)) + .unwrap_or(false) + || delete_install_paths.contains(path) + }) + .collect::>(); + + let delete_hashes = overrides + .iter() + .filter_map(|x| x.0.as_ref().map(|x| x.as_bytes().to_vec())) + .collect::>(); + let delete_install_paths = overrides + .iter() + .map(|x| x.1.to_string_lossy().to_string()) + .collect::>(); + + let mut transaction = pool.begin().await?; + + let files_to_delete = sqlx::query!( + " + SELECT spf.file_id + FROM shared_profiles_files spf + INNER JOIN files f ON f.id = spf.file_id + INNER JOIN hashes h ON h.file_id = f.id + WHERE (shared_profile_id = $1 AND (h.hash = ANY($2) OR install_path = ANY($3::text[]))) + ", + profile_item.inner.id.0, + &delete_hashes[..], + &delete_install_paths[..], + ) + .fetch_all(&mut *transaction) + .await? + .into_iter() + .map(|x| FileId(x.file_id)) + .collect::>(); + + sqlx::query!( + " + DELETE FROM shared_profiles_files + WHERE file_id = ANY($1::bigint[]) AND shared_profile_id = $2 + ", + &files_to_delete.iter().map(|x| x.0).collect::>()[..], + profile_item.inner.id.0, + ) + .execute(&mut *transaction) + .await?; + + // Check if any versions_files or shared_profiles_files still reference the file- these files should not be deleted + // Delete the files that are not referenced + let deleted_hashes = + file_item::remove_unreferenced_files(files_to_delete, &mut transaction).await?; + + // Set updated + sqlx::query!( + " + UPDATE shared_profiles + SET updated = NOW() + WHERE id = $1 + ", + profile_item.inner.id.0, + ) + .execute(&mut *transaction) + .await?; + + transaction.commit().await?; + + database::models::client_profile_item::ClientProfile::clear_cache( + profile_item.inner.id, + &redis, + ) + .await?; + + // Delete the files from the CDN if they are no longer used by any profile + delete_unused_files_from_host(deleted_hashes, &pool, &file_host).await?; + + Ok(HttpResponse::NoContent().body("")) +} + +// For a list of deleted hashes, delete the files from the CDN if they are no longer used by any profile +async fn delete_unused_files_from_host( + deleted_hashes: Vec, + pool: &PgPool, + file_host: &Arc, +) -> Result<(), ApiError> { + // Confirm hashes no longer exist in any profile (for sureness) + let deleted_hashes_bytes = deleted_hashes + .iter() + .map(|x| x.as_bytes().to_vec()) + .collect::>(); + let still_existing_hashes = sqlx::query!( + " + SELECT encode(hash, 'escape') hash FROM hashes + WHERE hash = ANY($1) + ", + &deleted_hashes_bytes + ) + .fetch_all(pool) + .await? + .into_iter() + .filter_map(|x| x.hash) + .collect::>(); + + // We want to delete files from the server that are no longer used by any profile + let hashes_to_delete = deleted_hashes + .into_iter() + .filter(|x| !still_existing_hashes.contains(x)) + .collect::>(); + let hashes_to_delete = hashes_to_delete + .iter() + .map(|x| x.as_str()) + .collect::>(); + + for hash in hashes_to_delete { + file_host + .delete_file_version("", &format!("custom_files/{}", hash)) + .await?; + } + + Ok(()) +} + +// This function is used for adding an override file to a pack +#[allow(clippy::too_many_arguments)] +pub async fn upload_file( + field: &mut Field, + multipart_files: &[MultipartFile], + file_host: &dyn FileHost, + content_disposition: &actix_web::http::header::ContentDisposition, + cdn_url: &str, + file_type: Option, + client_profile_files: &mut Vec, + uploaded_files: &mut Vec, + transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>, +) -> Result<(), CreateError> { + let (file_name, file_extension) = get_name_ext(content_disposition)?; + + if file_name.contains('/') { + return Err(CreateError::InvalidInput( + "File names must not contain slashes!".to_string(), + )); + } + + let content_type = crate::util::ext::project_file_type(file_extension) + .ok_or_else(|| CreateError::InvalidFileType(file_extension.to_string()))?; + + let data = read_from_field( + field, 500 * (1 << 20), + "Project file exceeds the maximum of 500MiB. Contact a moderator or admin to request permission to upload larger files." + ).await?; + + let name = content_disposition + .get_name() + .ok_or_else(|| CreateError::InvalidInput(String::from("Upload must have a name")))?; + + let install_path = multipart_files + .iter() + .find(|x| x.file_name == name) + .ok_or_else(|| { + CreateError::InvalidInput(format!( + "No matching file name in `data` for file '{}'", + name + )) + })? + .install_path + .clone(); + + let hash = format!("{:x}", sha2::Sha512::digest(&data)); + + // First, check if this file already exists with an approved project's version + let existing_collision = sqlx::query!( + " + SELECT m.slug FROM hashes h + INNER JOIN files f ON f.id = h.file_id + INNER JOIN versions_files vf on vf.file_id = f.id + INNER JOIN versions v ON v.id = vf.version_id + INNER JOIN mods m ON m.id = v.mod_id AND m.status = ANY($3) + WHERE h.algorithm = $2 AND h.hash = $1 + ", + hash.as_bytes(), + "sha512", + &*crate::models::projects::ProjectStatus::iterator() + .filter(|x| x.is_approved()) + .map(|x| x.to_string()) + .collect::>(), + ) + .fetch_optional(&mut **transaction) + .await? + .and_then(|x| x.slug); + + if let Some(existing_collision) = existing_collision { + return Err(CreateError::InvalidInput(format!( + "Override file at path '{}' already exists in Modrinth for mod '{}'", + install_path, existing_collision + ))); + } + + // Allow uploading the same file multiple times, if none are for approved versions, but + // we'll connect them to the same file in the database/CDN + let existing_file = sqlx::query!( + " + SELECT f.id + FROM hashes h + INNER JOIN files f ON f.id = h.file_id + WHERE h.algorithm = $2 AND h.hash = $1 + ", + hash.as_bytes(), + "sha512", + ) + .fetch_optional(&mut **transaction) + .await? + .map(|x| FileId(x.id)); + + let data = data.freeze(); + let file_path = format!("custom_files/{hash}"); + + let upload_data = file_host + .upload_file(content_type, &file_path, data) + .await?; + + let sha1_bytes = upload_data.content_sha1.into_bytes(); + let sha512_bytes = upload_data.content_sha512.into_bytes(); + + client_profile_files.push(ClientProfileFileBuilder { + filename: file_name.to_string(), + url: format!("{}/{}", cdn_url, upload_data.file_name), + hashes: vec![ + file_item::HashBuilder { + algorithm: "sha1".to_string(), + // This is an invalid cast - the database expects the hash's + // bytes, but this is the string version. + hash: sha1_bytes, + }, + file_item::HashBuilder { + algorithm: "sha512".to_string(), + // This is an invalid cast - the database expects the hash's + // bytes, but this is the string version. + hash: sha512_bytes, + }, + ], + install_path: install_path.into(), + size: upload_data.content_length, + existing_file, + file_type, + }); + + uploaded_files.push(UploadedFile { + file_id: upload_data.file_id, + file_name: file_path, + }); + + Ok(()) +} diff --git a/src/routes/internal/mod.rs b/src/routes/internal/mod.rs index 81ac4c9b..77aec77f 100644 --- a/src/routes/internal/mod.rs +++ b/src/routes/internal/mod.rs @@ -1,4 +1,5 @@ pub(crate) mod admin; +pub mod client; pub mod flows; pub mod pats; pub mod session; @@ -12,6 +13,7 @@ pub fn config(cfg: &mut actix_web::web::ServiceConfig) { actix_web::web::scope("_internal") .wrap(default_cors()) .configure(admin::config) + .configure(client::profiles::config) // TODO: write tests that catch these .configure(oauth_clients::config) .configure(session::config) diff --git a/src/routes/maven.rs b/src/routes/maven.rs index 37cfe17d..f1a83409 100644 --- a/src/routes/maven.rs +++ b/src/routes/maven.rs @@ -2,7 +2,7 @@ use crate::auth::checks::{is_visible_project, is_visible_version}; use crate::database::models::legacy_loader_fields::MinecraftGameVersion; use crate::database::models::loader_fields::Loader; use crate::database::models::project_item::QueryProject; -use crate::database::models::version_item::{QueryFile, QueryVersion}; +use crate::database::models::version_item::{QueryVersion, QueryVersionFile}; use crate::database::redis::RedisPool; use crate::models::pats::Scopes; use crate::models::projects::{ProjectId, VersionId}; @@ -230,7 +230,7 @@ fn find_file<'a>( vcoords: &str, version: &'a QueryVersion, file: &str, -) -> Option<&'a QueryFile> { +) -> Option<&'a QueryVersionFile> { if let Some(selected_file) = version.files.iter().find(|x| x.filename == file) { return Some(selected_file); } diff --git a/src/routes/mod.rs b/src/routes/mod.rs index 18581eaa..3c250144 100644 --- a/src/routes/mod.rs +++ b/src/routes/mod.rs @@ -21,6 +21,7 @@ mod not_found; mod updates; pub use self::not_found::not_found; +use self::v3::project_creation::CreateError; pub fn root_config(cfg: &mut web::ServiceConfig) { cfg.service( @@ -187,3 +188,21 @@ impl actix_web::ResponseError for ApiError { }) } } + +// A simple trait that allows helper functions to return ApiError or CreateError, +// while keeping the ability to generate InvalidInput(String) +pub trait CommonError { + fn invalid_input(s: String) -> Self; +} + +impl CommonError for ApiError { + fn invalid_input(s: String) -> Self { + ApiError::InvalidInput(s) + } +} + +impl CommonError for CreateError { + fn invalid_input(s: String) -> Self { + CreateError::InvalidInput(s) + } +} diff --git a/src/routes/v3/projects.rs b/src/routes/v3/projects.rs index 4ba06ea4..bb937914 100644 --- a/src/routes/v3/projects.rs +++ b/src/routes/v3/projects.rs @@ -6,7 +6,7 @@ use crate::auth::{filter_visible_projects, get_user_from_headers}; use crate::database::models::notification_item::NotificationBuilder; use crate::database::models::project_item::{GalleryItem, ModCategory}; use crate::database::models::thread_item::ThreadMessageBuilder; -use crate::database::models::{ids as db_ids, image_item, TeamMember}; +use crate::database::models::{file_item, ids as db_ids, image_item, TeamMember}; use crate::database::redis::RedisPool; use crate::database::{self, models as db_models}; use crate::file_hosting::FileHost; @@ -375,6 +375,28 @@ pub async fn project_edit( ) .execute(&mut *transaction) .await?; + + // On approval, all versions become unique 'owners' of their files + // Get them directly rather than use project_item.versions (which is missing hidden versions) + // TODO: this can be simplified when .versions field on the Project struct is revised + let version_ids = sqlx::query!( + " + SELECT id FROM versions + WHERE mod_id = $1 + ", + id as db_ids::ProjectId, + ) + .fetch_many(&mut *transaction) + .try_filter_map(|e| async { Ok(e.right().map(|c| db_ids::VersionId(c.id))) }) + .try_collect::>() + .await?; + + file_item::convert_hash_collisions_to_versions::( + &version_ids, + &mut transaction, + &redis, + ) + .await?; } if status.is_searchable() && !project_item.inner.webhook_sent { if let Ok(webhook_url) = dotenvy::var("PUBLIC_DISCORD_WEBHOOK") { diff --git a/src/routes/v3/statistics.rs b/src/routes/v3/statistics.rs index c6c24e1a..ce899223 100644 --- a/src/routes/v3/statistics.rs +++ b/src/routes/v3/statistics.rs @@ -66,7 +66,8 @@ pub async fn get_stats(pool: web::Data) -> Result( + &[version_id], + &mut *transaction, + redis, + ) + .await?; + } + models::Project::clear_cache(project_id, None, Some(true), redis).await?; Ok(HttpResponse::Ok().json(response)) @@ -734,6 +749,21 @@ async fn upload_file_to_version_inner( } } + // On upload to approved project, the version become unique 'owner' of the file + let project = models::Project::get_id(version.inner.project_id, &mut **transaction, &redis) + .await? + .ok_or_else(|| { + CreateError::InvalidInput("An invalid project id was supplied".to_string()) + })?; + if project.inner.status.is_approved() { + file_item::convert_hash_collisions_to_versions::( + &[version_id], + &mut *transaction, + &redis, + ) + .await?; + } + // Clear version cache models::Version::clear_cache(&version, &redis).await?; @@ -779,28 +809,50 @@ pub async fn upload_file( ).await?; let hash = sha1::Sha1::from(&data).hexdigest(); - let exists = sqlx::query!( + // First, check if this file already exists with an approved project's version + let existing_collision = sqlx::query!( " - SELECT EXISTS(SELECT 1 FROM hashes h + SELECT m.slug FROM hashes h INNER JOIN files f ON f.id = h.file_id - INNER JOIN versions v ON v.id = f.version_id - WHERE h.algorithm = $2 AND h.hash = $1 AND v.mod_id != $3) + INNER JOIN versions_files vf on vf.file_id = f.id + INNER JOIN versions v ON v.id = vf.version_id + INNER JOIN mods m ON m.id = v.mod_id AND m.status = ANY($3) + WHERE h.algorithm = $2 AND h.hash = $1 ", hash.as_bytes(), "sha1", - project_id.0 as i64 + &*crate::models::projects::ProjectStatus::iterator() + .filter(|x| x.is_approved()) + .map(|x| x.to_string()) + .collect::>(), ) - .fetch_one(&mut **transaction) + .fetch_optional(&mut **transaction) .await? - .exists - .unwrap_or(false); + .and_then(|x| x.slug); - if exists { - return Err(CreateError::InvalidInput( - "Duplicate files are not allowed to be uploaded to Modrinth!".to_string(), - )); + if let Some(existing_collision) = existing_collision { + return Err(CreateError::InvalidInput(format!( + "File '{}' already exists in Modrinth for mod '{}' by hash.", + file_name, existing_collision + ))); } + // Allow uploading the same file multiple times, if none are for approved versions, but + // we'll connect them to the same file in the database/CDN + let existing_file = sqlx::query!( + " + SELECT f.id + FROM hashes h + INNER JOIN files f ON f.id = h.file_id + WHERE h.algorithm = $2 AND h.hash = $1 + ", + hash.as_bytes(), + "sha1", + ) + .fetch_optional(&mut **transaction) + .await? + .map(|x| FileId(x.id)); + let validation_result = validate_file( data.clone().into(), file_extension.to_string(), @@ -829,7 +881,8 @@ pub async fn upload_file( " SELECT v.id version_id, v.mod_id project_id, h.hash hash FROM hashes h INNER JOIN files f on h.file_id = f.id - INNER JOIN versions v on f.version_id = v.id + INNER JOIN versions_files vf on vf.file_id = f.id + INNER JOIN versions v on v.id = vf.version_id WHERE h.algorithm = 'sha1' AND h.hash = ANY($1) ", &*hashes @@ -914,14 +967,15 @@ pub async fn upload_file( version_files.push(VersionFileBuilder { filename: file_name.to_string(), url: format!("{cdn_url}/{file_path_encode}"), + existing_file, hashes: vec![ - models::version_item::HashBuilder { + models::file_item::HashBuilder { algorithm: "sha1".to_string(), // This is an invalid cast - the database expects the hash's // bytes, but this is the string version. hash: sha1_bytes, }, - models::version_item::HashBuilder { + models::file_item::HashBuilder { algorithm: "sha512".to_string(), // This is an invalid cast - the database expects the hash's // bytes, but this is the string version. diff --git a/src/routes/v3/version_file.rs b/src/routes/v3/version_file.rs index 8400cfbd..e746d638 100644 --- a/src/routes/v3/version_file.rs +++ b/src/routes/v3/version_file.rs @@ -1,6 +1,7 @@ use super::ApiError; use crate::auth::checks::{filter_visible_versions, is_visible_version}; use crate::auth::{filter_visible_projects, get_user_from_headers}; +use crate::database::models::file_item; use crate::database::redis::RedisPool; use crate::models::ids::VersionId; use crate::models::pats::Scopes; @@ -621,23 +622,17 @@ pub async fn delete_file( sqlx::query!( " - DELETE FROM hashes - WHERE file_id = $1 - ", - row.id.0 + DELETE FROM versions_files + WHERE file_id = $1 + ", + &row.id.0 ) .execute(&mut *transaction) .await?; - sqlx::query!( - " - DELETE FROM files - WHERE files.id = $1 - ", - row.id.0, - ) - .execute(&mut *transaction) - .await?; + // Check if any versions_files or shared_profiles_files still reference the file- these files should not be deleted + // Delete the files that are not referenced + file_item::remove_unreferenced_files(vec![row.id], &mut transaction).await?; transaction.commit().await?; diff --git a/src/routes/v3/versions.rs b/src/routes/v3/versions.rs index 01d96142..eac1c0fc 100644 --- a/src/routes/v3/versions.rs +++ b/src/routes/v3/versions.rs @@ -516,7 +516,7 @@ pub async fn version_edit_helper( sqlx::query!( " - UPDATE files + UPDATE versions_files SET is_primary = FALSE WHERE (version_id = $1) ", @@ -527,11 +527,12 @@ pub async fn version_edit_helper( sqlx::query!( " - UPDATE files + UPDATE versions_files SET is_primary = TRUE - WHERE (id = $1) + WHERE (file_id = $1 AND version_id = $2) ", result.id, + id as database::models::ids::VersionId, ) .execute(&mut *transaction) .await?; diff --git a/tests/common/api_v3/client_profile.rs b/tests/common/api_v3/client_profile.rs new file mode 100644 index 00000000..164f2ddd --- /dev/null +++ b/tests/common/api_v3/client_profile.rs @@ -0,0 +1,318 @@ +use std::path::PathBuf; + +use actix_http::StatusCode; +use actix_web::{ + dev::ServiceResponse, + test::{self, TestRequest}, +}; +use bytes::Bytes; +use itertools::Itertools; +use labrinth::{ + models::client::profile::{ClientProfile, ClientProfileShareLink}, + routes::internal::client::profiles::ProfileDownload, + util::actix::{AppendsMultipart, MultipartSegment, MultipartSegmentData}, +}; +use serde_json::json; + +use crate::{ + assert_status, + common::{ + api_common::{request_data::ImageData, Api, AppendsOptionalPat}, + dummy_data::TestFile, + }, +}; + +use super::ApiV3; +pub struct ClientProfileOverride { + pub file_name: String, + pub install_path: String, + pub bytes: Vec, +} + +impl ClientProfileOverride { + pub fn new(test_file: TestFile, install_path: &str) -> Self { + Self { + file_name: test_file.filename(), + install_path: install_path.to_string(), + bytes: test_file.bytes(), + } + } +} + +impl ApiV3 { + pub async fn create_client_profile( + &self, + name: &str, + loader: &str, + loader_version: &str, + game_version: &str, + versions: Vec<&str>, + pat: Option<&str>, + ) -> ServiceResponse { + let req = test::TestRequest::post() + .uri("/_internal/client/profile") + .append_pat(pat) + .set_json(json!({ + "name": name, + "loader": loader, + "loader_version": loader_version, + "game": "minecraft-java", + "game_version": game_version, + "versions": versions + })) + .to_request(); + self.call(req).await + } + + #[allow(clippy::too_many_arguments)] + pub async fn edit_client_profile( + &self, + id: &str, + name: Option<&str>, + loader: Option<&str>, + loader_version: Option<&str>, + versions: Option>, + remove_users: Option>, + remove_links: Option>, + pat: Option<&str>, + ) -> ServiceResponse { + let req = test::TestRequest::patch() + .uri(&format!("/_internal/client/profile/{}", id)) + .append_pat(pat) + .set_json(json!({ + "name": name, + "loader": loader, + "loader_version": loader_version, + "versions": versions, + "remove_users": remove_users, + "remove_links": remove_links + })) + .to_request(); + self.call(req).await + } + + pub async fn get_client_profile(&self, id: &str, pat: Option<&str>) -> ServiceResponse { + let req = TestRequest::get() + .uri(&format!("/_internal/client/profile/{}", id)) + .append_pat(pat) + .to_request(); + self.call(req).await + } + + pub async fn get_client_profile_deserialized( + &self, + id: &str, + pat: Option<&str>, + ) -> ClientProfile { + let resp = self.get_client_profile(id, pat).await; + assert_status!(&resp, StatusCode::OK); + test::read_body_json(resp).await + } + + // Like get_client_profile, but via a share id + pub async fn get_client_profile_from_share_link( + &self, + url_identifier: &str, + pat: Option<&str>, + ) -> ServiceResponse { + let req = TestRequest::get() + .uri(&format!("/_internal/client/share/{}", url_identifier)) + .append_pat(pat) + .to_request(); + self.call(req).await + } + + pub async fn get_client_profile_from_share_link_deserialized( + &self, + url_identifier: &str, + pat: Option<&str>, + ) -> ClientProfile { + let resp = self + .get_client_profile_from_share_link(url_identifier, pat) + .await; + assert_status!(&resp, StatusCode::OK); + test::read_body_json(resp).await + } + + pub async fn get_user_client_profiles(&self, pat: Option<&str>) -> ServiceResponse { + let req = TestRequest::get() + .uri("/_internal/client/user") + .append_pat(pat) + .to_request(); + self.call(req).await + } + + pub async fn get_user_client_profiles_deserialized( + &self, + pat: Option<&str>, + ) -> Vec { + let resp = self.get_user_client_profiles(pat).await; + assert_status!(&resp, StatusCode::OK); + test::read_body_json(resp).await + } + + pub async fn delete_client_profile(&self, id: &str, pat: Option<&str>) -> ServiceResponse { + let req = TestRequest::delete() + .uri(&format!("/_internal/client/profile/{}", id)) + .append_pat(pat) + .to_request(); + self.call(req).await + } + + pub async fn edit_client_profile_icon( + &self, + id: &str, + icon: Option, + pat: Option<&str>, + ) -> ServiceResponse { + if let Some(icon) = icon { + let req = TestRequest::patch() + .uri(&format!( + "/_internal/client/profile/{}/icon?ext={}", + id, icon.extension + )) + .append_pat(pat) + .set_payload(Bytes::from(icon.icon)) + .to_request(); + self.call(req).await + } else { + let req = TestRequest::delete() + .uri(&format!("/_internal/client/profile/{}/icon", id)) + .append_pat(pat) + .to_request(); + self.call(req).await + } + } + + pub async fn add_client_profile_overrides( + &self, + id: &str, + overrides: Vec, + pat: Option<&str>, + ) -> ServiceResponse { + let mut data = Vec::new(); + let mut multipart_segments: Vec = Vec::new(); + for override_ in overrides { + data.push(serde_json::json!({ + "file_name": override_.file_name, + "install_path": override_.install_path, + })); + multipart_segments.push(MultipartSegment { + name: override_.file_name.clone(), + filename: Some(override_.file_name), + content_type: None, + data: MultipartSegmentData::Binary(override_.bytes.to_vec()), + }); + } + let multipart_segments = std::iter::once(MultipartSegment { + name: "data".to_string(), + filename: None, + content_type: Some("application/json".to_string()), + data: MultipartSegmentData::Text(serde_json::to_string(&data).unwrap()), + }) + .chain(multipart_segments.into_iter()) + .collect_vec(); + + let req = TestRequest::post() + .uri(&format!("/_internal/client/profile/{}/override", id)) + .append_pat(pat) + .set_multipart(multipart_segments) + .to_request(); + self.call(req).await + } + + pub async fn delete_client_profile_overrides( + &self, + id: &str, + install_paths: Option<&[&PathBuf]>, + hashes: Option<&[&str]>, + pat: Option<&str>, + ) -> ServiceResponse { + let req = TestRequest::delete() + .uri(&format!("/_internal/client/profile/{}/override", id)) + .set_json(json!({ + "install_paths": install_paths, + "hashes": hashes + })) + .append_pat(pat) + .to_request(); + self.call(req).await + } + + pub async fn generate_client_profile_share_link( + &self, + id: &str, + pat: Option<&str>, + ) -> ServiceResponse { + let req = TestRequest::post() + .uri(&format!("/_internal/client/profile/{}/share", id)) + .append_pat(pat) + .to_request(); + self.call(req).await + } + + pub async fn generate_client_profile_share_link_deserialized( + &self, + id: &str, + pat: Option<&str>, + ) -> ClientProfileShareLink { + let resp = self.generate_client_profile_share_link(id, pat).await; + assert_status!(&resp, StatusCode::OK); + test::read_body_json(resp).await + } + + pub async fn accept_client_profile_share_link( + &self, + url_identifier: &str, + pat: Option<&str>, + ) -> ServiceResponse { + let req = TestRequest::post() + .uri(&format!( + "/_internal/client/share/{}/accept", + url_identifier + )) + .append_pat(pat) + .to_request(); + self.call(req).await + } + + // Get links and token + pub async fn download_client_profile_from_profile_id( + &self, + profile_id: &str, + pat: Option<&str>, + ) -> ServiceResponse { + let req = TestRequest::get() + .uri(&format!("/_internal/client/profile/{}/files", profile_id)) + .append_pat(pat) + .to_request(); + self.call(req).await + } + + pub async fn download_client_profile_from_profile_id_deserialized( + &self, + profile_id: &str, + pat: Option<&str>, + ) -> ProfileDownload { + let resp = self + .download_client_profile_from_profile_id(profile_id, pat) + .await; + assert_status!(&resp, StatusCode::OK); + test::read_body_json(resp).await + } + + pub async fn check_download_client_profile_token( + &self, + url: &str, // Full URL, the route will parse it + pat: Option<&str>, + ) -> ServiceResponse { + let req = TestRequest::get() + .uri(&format!( + "/_internal/client/check_token?url={url}", + url = urlencoding::encode(url) + )) + .append_pat(pat) + .to_request(); + self.call(req).await + } +} diff --git a/tests/common/api_v3/mod.rs b/tests/common/api_v3/mod.rs index caab4ab6..973914ac 100644 --- a/tests/common/api_v3/mod.rs +++ b/tests/common/api_v3/mod.rs @@ -9,6 +9,7 @@ use async_trait::async_trait; use labrinth::LabrinthConfig; use std::rc::Rc; +pub mod client_profile; pub mod collections; pub mod oauth; pub mod oauth_clients; diff --git a/tests/common/database.rs b/tests/common/database.rs index cc56f025..7f1d11e2 100644 --- a/tests/common/database.rs +++ b/tests/common/database.rs @@ -18,19 +18,22 @@ pub const ADMIN_USER_ID: &str = "1"; pub const MOD_USER_ID: &str = "2"; pub const USER_USER_ID: &str = "3"; // This is the 'main' user ID, and is used for most tests. pub const FRIEND_USER_ID: &str = "4"; // This is exactly the same as USER_USER_ID, but could be used for testing friend-only endpoints (ie: teams, etc) -pub const ENEMY_USER_ID: &str = "5"; // This is exactly the same as USER_USER_ID, but could be used for testing friend-only endpoints (ie: teams, etc) +pub const OTHER_FRIEND_USER_ID: &str = "5"; // This is exactly the same as USER_USER_ID, but could be used for testing friend-only endpoints (ie: teams, etc) +pub const ENEMY_USER_ID: &str = "6"; // This is exactly the same as USER_USER_ID, but could be used for testing friend-only endpoints (ie: teams, etc) pub const ADMIN_USER_ID_PARSED: i64 = 1; pub const MOD_USER_ID_PARSED: i64 = 2; pub const USER_USER_ID_PARSED: i64 = 3; pub const FRIEND_USER_ID_PARSED: i64 = 4; -pub const ENEMY_USER_ID_PARSED: i64 = 5; +pub const OTHER_FRIEND_USER_ID_PARSED: i64 = 5; +pub const ENEMY_USER_ID_PARSED: i64 = 6; // These are full-scoped PATs- as if the user was logged in (including illegal scopes). pub const ADMIN_USER_PAT: Option<&str> = Some("mrp_patadmin"); pub const MOD_USER_PAT: Option<&str> = Some("mrp_patmoderator"); pub const USER_USER_PAT: Option<&str> = Some("mrp_patuser"); pub const FRIEND_USER_PAT: Option<&str> = Some("mrp_patfriend"); +pub const OTHER_FRIEND_USER_PAT: Option<&str> = Some("mrp_patotherfriend"); pub const ENEMY_USER_PAT: Option<&str> = Some("mrp_patenemy"); const TEMPLATE_DATABASE_NAME: &str = "labrinth_tests_template"; diff --git a/tests/common/dummy_data.rs b/tests/common/dummy_data.rs index b2cdcb0c..b24eac53 100644 --- a/tests/common/dummy_data.rs +++ b/tests/common/dummy_data.rs @@ -7,7 +7,7 @@ use labrinth::models::{ oauth_clients::OAuthClient, organizations::Organization, pats::Scopes, - projects::{Project, ProjectId, Version}, + projects::{Project, ProjectId, Version, VersionId}, }; use serde_json::json; use sqlx::Executor; @@ -192,6 +192,7 @@ impl DummyData { project_slug: project_alpha.slug.unwrap(), project_id_parsed: project_alpha.id, version_id: project_alpha_version.id.to_string(), + version_id_parsed: project_alpha_version.id, thread_id: project_alpha.thread_id.to_string(), file_hash: project_alpha_version.files[0].hashes["sha1"].clone(), }, @@ -202,6 +203,7 @@ impl DummyData { project_slug: project_beta.slug.unwrap(), project_id_parsed: project_beta.id, version_id: project_beta_version.id.to_string(), + version_id_parsed: project_beta_version.id, thread_id: project_beta.thread_id.to_string(), file_hash: project_beta_version.files[0].hashes["sha1"].clone(), }, @@ -232,6 +234,7 @@ pub struct DummyProjectAlpha { pub project_slug: String, pub project_id_parsed: ProjectId, pub version_id: String, + pub version_id_parsed: VersionId, pub thread_id: String, pub file_hash: String, pub team_id: String, @@ -243,6 +246,7 @@ pub struct DummyProjectBeta { pub project_slug: String, pub project_id_parsed: ProjectId, pub version_id: String, + pub version_id_parsed: VersionId, pub thread_id: String, pub file_hash: String, pub team_id: String, diff --git a/tests/files/dummy_data.sql b/tests/files/dummy_data.sql index 457353d3..6f1d3b72 100644 --- a/tests/files/dummy_data.sql +++ b/tests/files/dummy_data.sql @@ -8,7 +8,8 @@ INSERT INTO users (id, username, name, email, role) VALUES (1, 'Admin', 'Adminis INSERT INTO users (id, username, name, email, role) VALUES (2, 'Moderator', 'Moderator Test', 'moderator@modrinth.com', 'moderator'); INSERT INTO users (id, username, name, email, role) VALUES (3, 'User', 'User Test', 'user@modrinth.com', 'developer'); INSERT INTO users (id, username, name, email, role) VALUES (4, 'Friend', 'Friend Test', 'friend@modrinth.com', 'developer'); -INSERT INTO users (id, username, name, email, role) VALUES (5, 'Enemy', 'Enemy Test', 'enemy@modrinth.com', 'developer'); +INSERT INTO users (id, username, name, email, role) VALUES (5, 'Other_friend', 'Other Friend Test', 'otherfriend@modrinth.com', 'developer'); +INSERT INTO users (id, username, name, email, role) VALUES (6, 'Enemy', 'Enemy Test', 'enemy@modrinth.com', 'developer'); -- Full PATs for each user, with different scopes -- These are not legal PATs, as they contain all scopes- they mimic permissions of a logged in user @@ -17,7 +18,8 @@ INSERT INTO pats (id, user_id, name, access_token, scopes, expires) VALUES (50, INSERT INTO pats (id, user_id, name, access_token, scopes, expires) VALUES (51, 2, 'moderator-pat', 'mrp_patmoderator', $1, '2030-08-18 15:48:58.435729+00'); INSERT INTO pats (id, user_id, name, access_token, scopes, expires) VALUES (52, 3, 'user-pat', 'mrp_patuser', $1, '2030-08-18 15:48:58.435729+00'); INSERT INTO pats (id, user_id, name, access_token, scopes, expires) VALUES (53, 4, 'friend-pat', 'mrp_patfriend', $1, '2030-08-18 15:48:58.435729+00'); -INSERT INTO pats (id, user_id, name, access_token, scopes, expires) VALUES (54, 5, 'enemy-pat', 'mrp_patenemy', $1, '2030-08-18 15:48:58.435729+00'); +INSERT INTO pats (id, user_id, name, access_token, scopes, expires) VALUES (54, 5, 'other-friend-pat', 'mrp_patotherfriend', $1, '2030-08-18 15:48:58.435729+00'); +INSERT INTO pats (id, user_id, name, access_token, scopes, expires) VALUES (55, 6, 'enemy-pat', 'mrp_patenemy', $1, '2030-08-18 15:48:58.435729+00'); INSERT INTO loaders (id, loader) VALUES (5, 'fabric'); INSERT INTO loaders_project_types (joining_loader_id, joining_project_type_id) VALUES (5,1); diff --git a/tests/profiles.rs b/tests/profiles.rs new file mode 100644 index 00000000..d872a5ac --- /dev/null +++ b/tests/profiles.rs @@ -0,0 +1,1539 @@ +use std::collections::HashSet; +use std::path::PathBuf; + +use actix_http::StatusCode; +use actix_web::test; +use common::api_v3::ApiV3; +use common::database::*; +use common::environment::with_test_environment; +use common::environment::TestEnvironment; +use labrinth::models::client::profile::ClientProfile; +use labrinth::models::client::profile::ClientProfileMetadata; +use labrinth::models::projects::Project; +use labrinth::models::users::UserId; +use sha2::Digest; + +use crate::common::api_common::ApiProject; +use crate::common::api_common::ApiVersion; +use crate::common::api_v3::client_profile::ClientProfileOverride; +use crate::common::api_v3::request_data::get_public_project_creation_data; +use crate::common::dummy_data::DummyImage; +use crate::common::dummy_data::TestFile; + +mod common; + +#[actix_rt::test] +async fn create_modify_profile() { + // Test setup and dummy data + with_test_environment(None, |test_env: TestEnvironment| async move { + // Create and modifiy a profile with certain properties + // Check that the properties are correct + let api = &test_env.api; + let alpha_version_id = test_env.dummy.project_alpha.version_id.to_string(); + + // Attempt to create a simple profile with invalid data, these should fail. + // - fake loader + // - fake loader version for loader + // - unparseable version (not to be confused with parseable but nonexistent version, which is simply ignored) + // - fake game version + let resp = api + .create_client_profile( + "test", + "fake-loader", + "1.0.0", + "1.20.1", + vec![], + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::BAD_REQUEST); + + // Currently fake version for loader is not checked + // let resp = api + // .create_client_profile("test", "fabric", "fake", "1.20.1", vec![], USER_USER_PAT) + // .await; + // assert_status!(&resp, StatusCode::BAD_REQUEST); + + let resp = api + .create_client_profile( + "test", + "fabric", + "1.0.0", + "1.20.1", + vec!["unparseable-version"], + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::BAD_REQUEST); + + let resp = api + .create_client_profile("test", "fabric", "1.0.0", "1.19.1", vec![], USER_USER_PAT) + .await; + assert_status!(&resp, StatusCode::BAD_REQUEST); + + // Create a simple profile + // should succeed + let profile = api + .create_client_profile("test", "fabric", "1.0.0", "1.20.1", vec![], USER_USER_PAT) + .await; + assert_status!(&profile, StatusCode::OK); + let profile: ClientProfile = test::read_body_json(profile).await; + let id = profile.id.to_string(); + + // Get the profile and check the properties are correct + let profile = api + .get_client_profile_deserialized(&id, USER_USER_PAT) + .await; + let updated = profile.updated; // Save this- it will update when we modify the versions/overrides + let ClientProfileMetadata::Minecraft { + game_version, + loader_version, + } = profile.game + else { + panic!("Wrong metadata type") + }; + assert_eq!(profile.name, "test"); + assert_eq!(profile.loader, "fabric"); + assert_eq!(loader_version, "1.0.0"); + assert_eq!(game_version, "1.20.1"); + assert_eq!(profile.share_links.unwrap().len(), 0); // No links yet + assert_eq!(profile.icon_url, None); + + // Modify the profile illegally in the same ways + let resp = api + .edit_client_profile( + &profile.id.to_string(), + None, + Some("fake-loader"), + None, + None, + None, + None, + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::BAD_REQUEST); + + // TODO: Currently fake version for loader is not checked + // let resp = api + // .edit_client_profile( + // &profile.id.to_string(), + // None, + // Some("fabric"), + // Some("fake"), + // None, + // USER_USER_PAT, + // ) + // .await; + // assert_status!(&resp, StatusCode::BAD_REQUEST); + + let resp = api + .edit_client_profile( + &profile.id.to_string(), + None, + Some("fabric"), + None, + Some(vec!["unparseable-version"]), + None, + None, + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::BAD_REQUEST); + + // Can't modify the profile as another user + let resp = api + .edit_client_profile( + &profile.id.to_string(), + None, + Some("fabric"), + None, + None, + None, + None, + FRIEND_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::UNAUTHORIZED); + + // Get and make sure the properties are the same + let profile = api + .get_client_profile_deserialized(&id, USER_USER_PAT) + .await; + assert_eq!(profile.name, "test"); + assert_eq!(profile.loader, "fabric"); + let ClientProfileMetadata::Minecraft { + game_version, + loader_version, + } = profile.game + else { + panic!("Wrong metadata type") + }; + assert_eq!(loader_version, "1.0.0"); + assert_eq!(game_version, "1.20.1"); + assert_eq!(profile.share_links.unwrap().len(), 0); + assert_eq!(profile.icon_url, None); + assert_eq!(profile.updated, updated); + + // A successful modification + let resp = api + .edit_client_profile( + &profile.id.to_string(), + Some("test2"), + Some("forge"), + Some("1.0.1"), + Some(vec![&alpha_version_id]), + None, + None, + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // Get the profile and check the properties + let profile = api + .get_client_profile_deserialized(&id, USER_USER_PAT) + .await; + assert_eq!(profile.name, "test2"); + assert_eq!(profile.loader, "forge"); + let ClientProfileMetadata::Minecraft { + game_version, + loader_version, + } = profile.game + else { + panic!("Wrong metadata type") + }; + assert_eq!(loader_version, "1.0.1"); + assert_eq!(game_version, "1.20.1"); + assert_eq!(profile.icon_url, None); + assert!(profile.updated > updated); + let updated = profile.updated; + + // Modify the profile again + let resp = api + .edit_client_profile( + &profile.id.to_string(), + Some("test3"), + Some("fabric"), + Some("1.0.0"), + Some(vec![]), + None, + None, + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // Get the profile and check the properties + let profile = api + .get_client_profile_deserialized(&id, USER_USER_PAT) + .await; + + assert_eq!(profile.name, "test3"); + assert_eq!(profile.loader, "fabric"); + let ClientProfileMetadata::Minecraft { + game_version, + loader_version, + } = profile.game + else { + panic!("Wrong metadata type") + }; + assert_eq!(loader_version, "1.0.0"); + assert_eq!(game_version, "1.20.1"); + assert_eq!(profile.icon_url, None); + assert!(profile.updated > updated); + }) + .await; +} + +#[actix_rt::test] +async fn accept_share_link() { + with_test_environment(None, |test_env: TestEnvironment| async move { + // Get download links for a created profile (including failure), create a share link, and create the correct number of tokens based on that + // They should expire after a time + let api = &test_env.api; + + // Create a simple profile + let profile = api + .create_client_profile("test", "fabric", "1.0.0", "1.20.1", vec![], USER_USER_PAT) + .await; + assert_status!(&profile, StatusCode::OK); + let id = test::read_body_json::(profile) + .await + .id + .to_string(); + + // get the profile + let profile = api + .get_client_profile_deserialized(&id, USER_USER_PAT) + .await; + assert_eq!(profile.share_links.unwrap().len(), 0); + let users: Vec = profile.users.unwrap(); + assert_eq!(users.len(), 1); + assert_eq!(users[0].0, USER_USER_ID_PARSED as u64); + + // Getting user's profiles should return the profile + let profiles = api + .get_user_client_profiles_deserialized(USER_USER_PAT) + .await; + assert_eq!(profiles.len(), 1); + assert_eq!(profiles[0].id.to_string(), id); + assert_eq!(profiles[0].owner_id.to_string(), USER_USER_ID); + + // Friend can't see the profile users, links, versions, install paths yet, but can see the profile + let profile = api + .get_client_profile_deserialized(&id, FRIEND_USER_PAT) + .await; + assert_eq!(profile.users, None); + + // Getting friend's profiles should not return the profile + let profiles = api + .get_user_client_profiles_deserialized(FRIEND_USER_PAT) + .await; + assert_eq!(profiles.len(), 0); + + // As 'user', try to generate a download link for the profile + let share_link = api + .generate_client_profile_share_link_deserialized(&id, USER_USER_PAT) + .await; + + // Get profile again + let profile = api + .get_client_profile_deserialized(&id, USER_USER_PAT) + .await; + assert_eq!(profile.share_links.unwrap().len(), 1); // Now has a share link + + // Link is an 'accept' link, when visited using any user token using POST, it should add the user to the profile + // As 'friend', accept the share link + let resp = api + .accept_client_profile_share_link(&share_link.id.to_string(), FRIEND_USER_PAT) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // Profile users should now include the friend + let profile = api + .get_client_profile_deserialized(&id, USER_USER_PAT) + .await; + let mut users = profile.users.unwrap(); + users.sort_by(|a, b| a.0.cmp(&b.0)); + assert_eq!(users.len(), 2); + assert_eq!(users[0].0, USER_USER_ID_PARSED as u64); + assert_eq!(users[1].0, FRIEND_USER_ID_PARSED as u64); + + // Getting friend's profiles should return the profile + let profiles = api + .get_user_client_profiles_deserialized(FRIEND_USER_PAT) + .await; + assert_eq!(profiles.len(), 1); + + // Add all of test dummy users until we hit the limit + let dummy_user_pats = [ + USER_USER_PAT, // Fails because owner (and already added) + FRIEND_USER_PAT, // Fails because already added + OTHER_FRIEND_USER_PAT, + MOD_USER_PAT, + ADMIN_USER_PAT, + ENEMY_USER_PAT, // If we add a 'max_users' field, this last test could be modified to fail + ]; + for (i, pat) in dummy_user_pats.iter().enumerate().take(4 + 1) { + let resp = api + .accept_client_profile_share_link(&share_link.id.to_string(), *pat) + .await; + if i == 0 || i == 1 { + assert_status!(&resp, StatusCode::BAD_REQUEST); + } else { + assert_status!(&resp, StatusCode::NO_CONTENT); + } + } + + // As user, remove share link + let resp = api + .edit_client_profile( + &id, + None, + None, + None, + None, + None, + Some(vec![&share_link.id.to_string()]), + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // Confirm share link is gone + let profile = api + .get_client_profile_deserialized(&id, USER_USER_PAT) + .await; + assert_eq!(profile.share_links.unwrap().len(), 0); + + // Friend still has the profile + let profiles = api + .get_user_client_profiles_deserialized(USER_USER_PAT) + .await; + assert_eq!(profiles.len(), 1); + let profiles = api + .get_user_client_profiles_deserialized(FRIEND_USER_PAT) + .await; + assert_eq!(profiles.len(), 1); + + // Remove friend + let resp = api + .edit_client_profile( + &id, + None, + None, + None, + None, + Some(vec![FRIEND_USER_ID]), + None, + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // Confirm friend is no longer on the profile + let profiles = api + .get_user_client_profiles_deserialized(USER_USER_PAT) + .await; + assert_eq!(profiles.len(), 1); + let profiles = api + .get_user_client_profiles_deserialized(FRIEND_USER_PAT) + .await; + assert_eq!(profiles.len(), 0); + }) + .await; +} + +#[actix_rt::test] +async fn delete_profile() { + with_test_environment(None, |test_env: TestEnvironment| async move { + // They should expire after a time + let api = &test_env.api; + + let alpha_version_id = &test_env.dummy.project_alpha.version_id.to_string(); + + // Create a simple profile + let profile = api + .create_client_profile( + "test", + "fabric", + "1.0.0", + "1.20.1", + vec![alpha_version_id], + USER_USER_PAT, + ) + .await; + assert_status!(&profile, StatusCode::OK); + let profile: ClientProfile = test::read_body_json(profile).await; + let id = profile.id.to_string(); + + // Add an override file to the profile + let resp = api + .add_client_profile_overrides( + &id, + vec![ClientProfileOverride::new( + TestFile::BasicMod, + "mods/test.jar", + )], + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // Invite a friend to the profile + let share_link = api + .generate_client_profile_share_link_deserialized(&id, USER_USER_PAT) + .await; + + // As friend, try to get the download links for the profile + // Not invited yet, should fail + let resp = api + .download_client_profile_from_profile_id(&id, FRIEND_USER_PAT) + .await; + assert_status!(&resp, StatusCode::UNAUTHORIZED); + + // Accept + let resp = api + .accept_client_profile_share_link(&share_link.id.to_string(), FRIEND_USER_PAT) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // Get profile from share link + let profile = api + .get_client_profile_from_share_link_deserialized( + &share_link.id.to_string(), + FRIEND_USER_PAT, + ) + .await; + + // Get a token as the friend, from the share link id + let token = api + .download_client_profile_from_profile_id_deserialized( + &profile.id.to_string(), + FRIEND_USER_PAT, + ) + .await; + + // Confirm it works + let resp = api + .check_download_client_profile_token(&token.override_cdns[0].url, FRIEND_USER_PAT) + .await; + assert_status!(&resp, StatusCode::OK); + + // Delete the profile as the friend + // Should fail + let resp = api.delete_client_profile(&id, FRIEND_USER_PAT).await; + assert_status!(&resp, StatusCode::UNAUTHORIZED); + + // Delete the profile as the user + // Should succeed + let resp = api.delete_client_profile(&id, USER_USER_PAT).await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // Confirm the profile is gone + let resp = api.get_client_profile(&id, USER_USER_PAT).await; + assert_status!(&resp, StatusCode::NOT_FOUND); + + // Confirm the token is gone + let resp = api + .check_download_client_profile_token(&token.override_cdns[0].url, FRIEND_USER_PAT) + .await; + assert_status!(&resp, StatusCode::UNAUTHORIZED); + }) + .await; +} + +#[actix_rt::test] +async fn download_profile() { + with_test_environment(None, |test_env: TestEnvironment| async move { + // Get download links for a created profile (including failure), create a share link, and create the correct number of tokens based on that + // They should expire after a time + let api = &test_env.api; + + // Create a simple profile + let profile = api + .create_client_profile("test", "fabric", "1.0.0", "1.20.1", vec![], USER_USER_PAT) + .await; + assert_status!(&profile, StatusCode::OK); + let profile: ClientProfile = test::read_body_json(profile).await; + let id = profile.id.to_string(); + + // Add an override file to the profile + let resp = api + .add_client_profile_overrides( + &id, + vec![ClientProfileOverride::new( + TestFile::BasicMod, + "mods/test.jar", + )], + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // As 'user', try to generate a download link for the profile + let resp = api + .download_client_profile_from_profile_id(&id, USER_USER_PAT) + .await; + assert_status!(&resp, StatusCode::OK); + + // As 'friend', try to get the download links for the profile + // Not invited yet, should fail + let resp = api + .download_client_profile_from_profile_id(&id, FRIEND_USER_PAT) + .await; + assert_status!(&resp, StatusCode::UNAUTHORIZED); + + // As 'user', try to generate a share link for the profile, and accept it as 'friend' + let share_link = api + .generate_client_profile_share_link_deserialized(&id, USER_USER_PAT) + .await; + let resp = api + .accept_client_profile_share_link(&share_link.id.to_string(), FRIEND_USER_PAT) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // As 'friend', try to get the download links for the profile + // Should succeed + let mut download = api + .download_client_profile_from_profile_id_deserialized(&id, FRIEND_USER_PAT) + .await; + + // But enemy should fail + let resp = api + .download_client_profile_from_profile_id(&id, ENEMY_USER_PAT) + .await; + assert_status!(&resp, StatusCode::UNAUTHORIZED); + + // Download url should be: + // - CDN url + // "custom_files" + // - hash + assert_eq!(download.override_cdns.len(), 1); + let override_file_url = download.override_cdns.remove(0).url; + let hash = format!("{:x}", sha2::Sha512::digest(&TestFile::BasicMod.bytes())); + assert_eq!( + override_file_url, + format!("{}/custom_files/{}", dotenvy::var("CDN_URL").unwrap(), hash) + ); + + // Check cloudflare helper route with a bad token (eg: the wrong user, or no user), or bad url should fail + let resp = api + .check_download_client_profile_token(&override_file_url, None) + .await; + assert_status!(&resp, StatusCode::UNAUTHORIZED); + let resp = api + .check_download_client_profile_token(&override_file_url, ENEMY_USER_PAT) + .await; + assert_status!(&resp, StatusCode::UNAUTHORIZED); + + let resp = api + .check_download_client_profile_token("bad_url", FRIEND_USER_PAT) + .await; + assert_status!(&resp, StatusCode::UNAUTHORIZED); + + let resp = api + .check_download_client_profile_token( + &format!( + "{}/custom_files/{}", + dotenvy::var("CDN_URL").unwrap(), + "example_hash" + ), + FRIEND_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::UNAUTHORIZED); + + // Check cloudflare helper route to confirm this is a valid allowable access token + // We attach it as an authorization token and call the route + let resp = api + .check_download_client_profile_token(&override_file_url, FRIEND_USER_PAT) + .await; + assert_status!(&resp, StatusCode::OK); + + // As user, remove friend from profile + let resp = api + .edit_client_profile( + &id, + None, + None, + None, + None, + Some(vec![FRIEND_USER_ID]), + None, + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // Confirm friend is no longer on the profile + let profile = api + .get_client_profile_deserialized(&id, USER_USER_PAT) + .await; + assert_eq!(profile.users.unwrap().len(), 1); + + // Confirm friend can no longer download the profile + let resp = api + .download_client_profile_from_profile_id(&id, FRIEND_USER_PAT) + .await; + assert_status!(&resp, StatusCode::UNAUTHORIZED); + + // Confirm token invalidation + let resp = api + .check_download_client_profile_token(&override_file_url, FRIEND_USER_PAT) + .await; + assert_status!(&resp, StatusCode::UNAUTHORIZED); + + // Confirm user can still download the profile + let resp = api + .download_client_profile_from_profile_id_deserialized(&id, USER_USER_PAT) + .await; + assert_eq!(resp.override_cdns.len(), 1); + }) + .await; +} + +#[actix_rt::test] +async fn add_remove_profile_icon() { + with_test_environment(None, |test_env: TestEnvironment| async move { + // Add and remove an icon from a profile + let api = &test_env.api; + + // Create a simple profile + let profile = api + .create_client_profile("test", "fabric", "1.0.0", "1.20.1", vec![], USER_USER_PAT) + .await; + assert_status!(&profile, StatusCode::OK); + let profile: ClientProfile = test::read_body_json(profile).await; + + // Add an icon to the profile + let icon = api + .edit_client_profile_icon( + &profile.id.to_string(), + Some(DummyImage::SmallIcon.get_icon_data()), + USER_USER_PAT, + ) + .await; + assert_status!(&icon, StatusCode::NO_CONTENT); + + // Get the profile and check the icon + let profile = api + .get_client_profile_deserialized(&profile.id.to_string(), USER_USER_PAT) + .await; + assert!(profile.icon_url.is_some()); + + // Remove the icon from the profile + let icon = api + .edit_client_profile_icon(&profile.id.to_string(), None, USER_USER_PAT) + .await; + assert_status!(&icon, StatusCode::NO_CONTENT); + + // Get the profile and check the icon + let profile = api + .get_client_profile_deserialized(&profile.id.to_string(), USER_USER_PAT) + .await; + assert!(profile.icon_url.is_none()); + }) + .await; +} + +#[actix_rt::test] +async fn add_remove_profile_versions() { + with_test_environment(None, |test_env: TestEnvironment| async move { + // Add and remove versions from a profile + let api = &test_env.api; + let alpha_version_id = test_env.dummy.project_alpha.version_id.to_string(); + // Create a simple profile + let profile = api + .create_client_profile("test", "fabric", "1.0.0", "1.20.1", vec![], USER_USER_PAT) + .await; + assert_status!(&profile, StatusCode::OK); + let profile: ClientProfile = test::read_body_json(profile).await; + let updated = profile.updated; // Save this- it will update when we modify the versions/overrides + + // Add a hosted version to the profile + let resp = api + .edit_client_profile( + &profile.id.to_string(), + None, + None, + None, + Some(vec![&alpha_version_id]), + None, + None, + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // Add an override file to the profile + let resp = api + .add_client_profile_overrides( + &profile.id.to_string(), + vec![ClientProfileOverride::new( + TestFile::BasicMod, + "mods/test.jar", + )], + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // Add a second version to the profile + let resp = api + .add_client_profile_overrides( + &profile.id.to_string(), + vec![ClientProfileOverride::new( + TestFile::BasicModDifferent, + "mods/test_different.jar", + )], + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // Get the profile and check the versions + let profile_downloads = api + .download_client_profile_from_profile_id_deserialized( + &profile.id.to_string(), + USER_USER_PAT, + ) + .await; + assert_eq!( + profile_downloads.version_ids, + vec![test_env.dummy.project_alpha.version_id_parsed] + ); + assert_eq!( + profile_downloads + .override_cdns + .into_iter() + .map(|x| x.install_path) + .collect::>(), + [ + PathBuf::from("mods/test.jar"), + PathBuf::from("mods/test_different.jar") + ] + .iter() + .cloned() + .collect::>() + ); + + // Get profile again to confirm update + let profile = api + .get_client_profile_deserialized(&profile.id.to_string(), USER_USER_PAT) + .await; + assert!(profile.updated > updated); + let updated = profile.updated; + + // Create a second profile using the same hashes, but ENEMY_USER_PAT + let profile_enemy = api + .create_client_profile("test2", "fabric", "1.0.0", "1.20.1", vec![], ENEMY_USER_PAT) + .await; + assert_status!(&profile_enemy, StatusCode::OK); + let profile_enemy: ClientProfile = test::read_body_json(profile_enemy).await; + let id_enemy = profile_enemy.id.to_string(); + + // Add the same override to the profile + let resp = api + .add_client_profile_overrides( + &id_enemy, + vec![ClientProfileOverride::new( + TestFile::BasicMod, + "mods/test.jar", + )], + ENEMY_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // Get the profile and check the versions + let profile_enemy = api + .download_client_profile_from_profile_id_deserialized(&id_enemy, ENEMY_USER_PAT) + .await; + + assert_eq!( + profile_enemy + .override_cdns + .into_iter() + .map(|x| x.install_path) + .collect::>(), + vec![PathBuf::from("mods/test.jar")] + ); + + // Attempt to delete the override test.jar from the user's profile + // Should succeed + let resp = api + .delete_client_profile_overrides( + &profile.id.to_string(), + Some(&[&PathBuf::from("mods/test.jar")]), + None, + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // Should still exist in the enemy's profile, but not the user's + let profile_enemy_downloads = api + .download_client_profile_from_profile_id_deserialized(&id_enemy, ENEMY_USER_PAT) + .await; + assert_eq!( + profile_enemy_downloads + .override_cdns + .into_iter() + .map(|x| x.install_path) + .collect::>(), + vec![PathBuf::from("mods/test.jar")] + ); + + let profile_downloads = api + .download_client_profile_from_profile_id_deserialized( + &profile.id.to_string(), + USER_USER_PAT, + ) + .await; + assert_eq!( + profile_downloads + .override_cdns + .into_iter() + .map(|x| x.install_path) + .collect::>(), + vec![PathBuf::from("mods/test_different.jar")] + ); + + // Get profile again to confirm update + let profile = api + .get_client_profile_deserialized(&profile.id.to_string(), USER_USER_PAT) + .await; + assert!(profile.updated > updated); + let updated = profile.updated; + + // TODO: put a test here for confirming the file's existence once tests are set up to do so + // The file should still exist in the CDN here, as the enemy still has it + + // Attempt to delete the override test_different.jar from the enemy's profile (One they don't have) + // Should fail + // First, by path + let resp = api + .delete_client_profile_overrides( + &id_enemy, + Some(&[&PathBuf::from("mods/test_different.jar")]), + None, + ENEMY_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); // Allow failure to return success, it just doesn't delete anything + + // Then, by hash + let resp = api + .delete_client_profile_overrides( + &id_enemy, + None, + Some(&[format!( + "{:x}", + sha2::Sha512::digest(&TestFile::BasicModDifferent.bytes()) + ) + .as_str()]), + ENEMY_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); // Allow failure to return success, it just doesn't delete anything + + // Confirm user still has it + let profile_downloads = api + .download_client_profile_from_profile_id_deserialized( + &profile.id.to_string(), + USER_USER_PAT, + ) + .await; + assert_eq!( + profile_downloads + .override_cdns + .into_iter() + .map(|x| x.install_path) + .collect::>(), + vec![PathBuf::from("mods/test_different.jar")] + ); + + // TODO: put a test here for confirming the file's existence once tests are set up to do so + // The file should still exist in the CDN here, as the enemy can't delete it + + // Now delete the override test_different.jar from the user's profile (by hash this time) + // Should succeed + let resp = api + .delete_client_profile_overrides( + &profile.id.to_string(), + None, + Some(&[format!( + "{:x}", + sha2::Sha512::digest(&TestFile::BasicModDifferent.bytes()) + ) + .as_str()]), + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // Confirm user no longer has it + let profile_downloads = api + .download_client_profile_from_profile_id_deserialized( + &profile.id.to_string(), + USER_USER_PAT, + ) + .await; + assert_eq!( + profile_downloads + .override_cdns + .into_iter() + .map(|x| x.install_path) + .collect::>(), + Vec::::new() + ); + + // Get profile again to confirm update + let profile = api + .get_client_profile_deserialized(&profile.id.to_string(), USER_USER_PAT) + .await; + assert!(profile.updated > updated); + + // In addition, delete "alpha_version_id" from the user's profile + // Should succeed + let resp = api + .edit_client_profile( + &profile.id.to_string(), + None, + None, + None, + Some(vec![]), + None, + None, + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // Confirm user no longer has it + let profile_downloads = api + .download_client_profile_from_profile_id_deserialized( + &profile.id.to_string(), + USER_USER_PAT, + ) + .await; + assert_eq!(profile_downloads.version_ids, vec![]); + }) + .await; +} + +// Profile gotten from share link vs profile gotten from profile id should be the same +#[actix_rt::test] +async fn share_link_profile_same_as_profile_id_profile() { + with_test_environment(None, |test_env: TestEnvironment| async move { + // Get download links for a created profile (including failure), create a share link, and create the correct number of tokens based on that + // They should expire after a time + let api = &test_env.api; + + // Create a simple profile + let profile = api + .create_client_profile("test", "fabric", "1.0.0", "1.20.1", vec![], USER_USER_PAT) + .await; + assert_status!(&profile, StatusCode::OK); + let profile: ClientProfile = test::read_body_json(profile).await; + let id = profile.id.to_string(); + + // Create a share link for the profile + let share_link = api + .generate_client_profile_share_link_deserialized(&id, USER_USER_PAT) + .await; + + // Get the profile from the share link + for pat in [USER_USER_PAT, FRIEND_USER_PAT].iter() { + let profile_from_share_link = api + .get_client_profile_from_share_link_deserialized(&share_link.id.to_string(), *pat) + .await; + + let profile_from_profile_id = api.get_client_profile_deserialized(&id, *pat).await; + + assert_eq!(profile_from_share_link, profile_from_profile_id); + } + }) + .await; +} + +// Cannot add versions you do not have visibility access to +#[actix_rt::test] +async fn hidden_versions_are_forbidden() { + // Test setup and dummy data + with_test_environment(None, |test_env: TestEnvironment| async move { + let api = &test_env.api; + let beta_version_id = test_env.dummy.project_beta.version_id.to_string(); + let alpha_version_id = test_env.dummy.project_alpha.version_id.to_string(); + let alpha_version_id_parsed = test_env.dummy.project_alpha.version_id_parsed; + + // Create a simple profile, as FRIEND, with beta version, which is not visible to FRIEND + // This should not include the beta version + let profile = api + .create_client_profile( + "test", + "fabric", + "1.0.0", + "1.20.1", + vec![&beta_version_id, &alpha_version_id], + FRIEND_USER_PAT, + ) + .await; + assert_status!(&profile, StatusCode::OK); + let profile: ClientProfile = test::read_body_json(profile).await; + let id = profile.id.to_string(); + + // Get the profile and check the versions + let profile_downloads = api + .download_client_profile_from_profile_id_deserialized(&id, FRIEND_USER_PAT) + .await; + assert_eq!(profile_downloads.version_ids, vec![alpha_version_id_parsed]); + + // Edit profile, as FRIEND, with beta version, which is not visible to FRIEND + // This should fail + let resp = api + .edit_client_profile( + &profile.id.to_string(), + None, + None, + None, + Some(vec![&beta_version_id]), + None, + None, + FRIEND_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // Get the profile and check the versions + // Empty, because alpha is removed, and beta is not visible + let profile_downloads = api + .download_client_profile_from_profile_id_deserialized(&id, FRIEND_USER_PAT) + .await; + assert_eq!(profile_downloads.version_ids, vec![]); + }) + .await; +} + +#[actix_rt::test] +async fn verison_file_hash_collisions_with_shared_profiles() { + // Test setup and dummy data + with_test_environment(None, |test_env: TestEnvironment| async move { + let api = &test_env.api; + + let test_file_hash_xxx = TestFile::build_random_jar(); + let test_file_hash_yyy = TestFile::build_random_jar(); + let test_file_hash_zzz = TestFile::build_random_jar(); + + // Define some comparison projects/profiles that already have these files + // unapproved project has xxx + let creation_data = + get_public_project_creation_data("unapproved", Some(test_file_hash_xxx.clone()), None); + let unapproved_project = api.create_project(creation_data, USER_USER_PAT).await; + assert_status!(&unapproved_project, StatusCode::OK); + + // approved project has yyy + let creation_data = + get_public_project_creation_data("approved", Some(test_file_hash_yyy.clone()), None); + let approved_project = api.create_project(creation_data, USER_USER_PAT).await; + assert_status!(&approved_project, StatusCode::OK); + + // Approve as a moderator. + let resp = api + .edit_project( + "approved", + serde_json::json!({"status": "approved"}), + MOD_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // shared profile has zzz + let existing_profile = api + .create_client_profile( + "existing", + "fabric", + "1.0.0", + "1.20.1", + vec![], + USER_USER_PAT, + ) + .await; + assert_status!(&existing_profile, StatusCode::OK); + let existing_profile: ClientProfile = test::read_body_json(existing_profile).await; + let resp = api + .add_client_profile_overrides( + &existing_profile.id.to_string(), + vec![ClientProfileOverride::new( + test_file_hash_zzz.clone(), + "mods/test0.jar", + )], + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + let test_data = get_public_project_creation_data("test", None, None); + let test_project = api.create_project(test_data, USER_USER_PAT).await; + assert_status!(&test_project, StatusCode::OK); + let project = test::read_body_json::(test_project).await; + + let test_profile = api + .create_client_profile("test", "fabric", "1.0.0", "1.20.1", vec![], USER_USER_PAT) + .await; + assert_status!(&test_profile, StatusCode::OK); + let test_profile: ClientProfile = test::read_body_json(test_profile).await; + + // 1. Existing unapproved version file, and we upload a version file with the same hash + // -> Should succeed- OK to have two unapproved version files with the same hash + let test_version = api + .add_public_version( + project.id, + "1.0.0", + test_file_hash_xxx.clone(), + None, + None, + USER_USER_PAT, + ) + .await; + assert_status!(&test_version, StatusCode::OK); + + // 2. Existing approved version file, and we upload a version file with the same hash + // -> Should fail, cannot have two approved version files with the same hash + let test_version = api + .add_public_version( + project.id, + "1.0.1", + test_file_hash_yyy.clone(), + None, + None, + USER_USER_PAT, + ) + .await; + assert_status!(&test_version, StatusCode::BAD_REQUEST); + + // 3. Existing unapproved version file, and we upload a shared profile override file + // -> Should succeed- OK, but they should attach to the same file id + let resp = api + .add_client_profile_overrides( + &test_profile.id.to_string(), + vec![ClientProfileOverride::new( + test_file_hash_xxx.clone(), + "mods/test1.jar", + )], + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + let resp = api + .delete_client_profile_overrides( + &test_profile.id.to_string(), + None, + Some(&[&"mods/test1.jar"]), + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // 4. Existing approved version file, and we upload a shared profile override file + // -> Should fail, tell user to attach as version instead of an override + let resp = api + .add_client_profile_overrides( + &test_profile.id.to_string(), + vec![ClientProfileOverride::new( + test_file_hash_yyy.clone(), + "mods/test2.jar", + )], + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::BAD_REQUEST); + + // 5. Existing shared profile override file, and we upload a shared profile override file + // -> Should suceced, and they should attach to the same file id + let resp = api + .add_client_profile_overrides( + &test_profile.id.to_string(), + vec![ClientProfileOverride::new( + test_file_hash_zzz.clone(), + "mods/test3.jar", + )], + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // 6. Existing shared profile override file, and we upload a version file (as of yet unapproved) + // -> Should succeed, and they should attach to the same file id + // difficulty comes in on approval, which is tested in 'version_file_hash_collisions_approving' + let test_version = api + .add_public_version( + project.id, + "1.0.2", + test_file_hash_zzz.clone(), + None, + None, + USER_USER_PAT, + ) + .await; + assert_status!(&test_version, StatusCode::OK); + }) + .await; +} + +#[actix_rt::test] +async fn version_file_hash_collisions_approving() { + // Test setup and dummy data + with_test_environment(None, |test_env: TestEnvironment| async move { + let api = &test_env.api; + let test_file_hash_xxx = TestFile::build_random_jar(); + let test_file_hash_yyy = TestFile::build_random_jar(); + + // Set up four projects with colliding hashes + // A: unapproved version file with XXX hash + // B: unapproved version file with XXX hash + // C: unapproved version file with YYY hash + // C: approved project with no versions (but will contain YYY hash) + + let unapproved_project_a = api + .create_project( + get_public_project_creation_data( + "unapproved_a", + Some(test_file_hash_xxx.clone()), + None, + ), + USER_USER_PAT, + ) + .await; + assert_status!(&unapproved_project_a, StatusCode::OK); + let unapproved_project_a = api + .get_project_deserialized("unapproved_a", USER_USER_PAT) + .await; + + let unapproved_project_b = api + .create_project( + get_public_project_creation_data( + "unapproved_b", + Some(test_file_hash_xxx.clone()), + None, + ), + USER_USER_PAT, + ) + .await; + assert_status!(&unapproved_project_b, StatusCode::OK); + let unapproved_project_b = api + .get_project_deserialized("unapproved_b", USER_USER_PAT) + .await; + + let unapproved_project_c = api + .create_project( + get_public_project_creation_data( + "unapproved_c", + Some(test_file_hash_yyy.clone()), + None, + ), + USER_USER_PAT, + ) + .await; + assert_status!(&unapproved_project_c, StatusCode::OK); + let unapproved_project_c = api + .get_project_deserialized("unapproved_c", USER_USER_PAT) + .await; + + let approved_project_d = api + .create_project( + get_public_project_creation_data("approved_d", None, None), + USER_USER_PAT, + ) + .await; + assert_status!(&approved_project_d, StatusCode::OK); + let approved_project_d = api + .get_project_deserialized("approved_d", USER_USER_PAT) + .await; + + // Approve as a moderator. + let resp = api + .edit_project( + &approved_project_d.id.to_string(), + serde_json::json!({"status": "approved"}), + MOD_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // 1. Approve one of the projects (A), should succeed + let resp = api + .edit_project( + &unapproved_project_a.id.to_string(), + serde_json::json!({"status": "approved"}), + MOD_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // 2. Approve the other project (B), should fail- hash collision! + let resp = api + .edit_project( + &unapproved_project_b.id.to_string(), + serde_json::json!({"status": "approved"}), + MOD_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::BAD_REQUEST); + + // 3. Attempt to add a version with XXX to the approved project (D), should fail- hash collision! + let resp = api + .add_public_version( + approved_project_d.id, + "1.0.0", + test_file_hash_xxx.clone(), + None, + None, + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::BAD_REQUEST); + + // 4. Attempt to add a version with YYY to the approved project (D), should succeed + let resp = api + .add_public_version( + approved_project_d.id, + "1.0.0", + test_file_hash_yyy.clone(), + None, + None, + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::OK); + + // 5. Approve the other project (C), should fail- hash collision! + let resp = api + .edit_project( + &unapproved_project_c.id.to_string(), + serde_json::json!({"status": "approved"}), + MOD_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::BAD_REQUEST); + }) + .await; +} + +// Has some redundant testing with version_file_hash_collisions_approving, but tests the profile side of things +#[actix_rt::test] +async fn version_file_hash_collisions_approving_with_profile() { + // Test setup and dummy data + with_test_environment(None, |test_env: TestEnvironment| async move { + // Set up three projects with colliding hashes + // A: unapproved version file with XXX hash + // C: approved project with no versions (but will contain YYY hash) + // Also, set up a shared profile that contains an overrides with XXX hash and YYY hash + let api = &test_env.api; + let test_file_hash_xxx = TestFile::build_random_jar(); + let test_file_hash_yyy = TestFile::build_random_jar(); + + let unapproved_project_a = api + .create_project( + get_public_project_creation_data( + "unapproved_a", + Some(test_file_hash_xxx.clone()), + None, + ), + USER_USER_PAT, + ) + .await; + assert_status!(&unapproved_project_a, StatusCode::OK); + let unapproved_project_a = api + .get_project_deserialized("unapproved_a", USER_USER_PAT) + .await; + + let approved_project_c = api + .create_project( + get_public_project_creation_data("approved_c", None, None), + USER_USER_PAT, + ) + .await; + assert_status!(&approved_project_c, StatusCode::OK); + let approved_project_c = api + .get_project_deserialized("approved_c", USER_USER_PAT) + .await; + + // Approve as a moderator. + let resp = api + .edit_project( + "approved_c", + serde_json::json!({"status": "approved"}), + MOD_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + let existing_profile = api + .create_client_profile( + "existing", + "fabric", + "1.0.0", + "1.20.1", + vec![], + USER_USER_PAT, + ) + .await; + assert_status!(&existing_profile, StatusCode::OK); + let existing_profile: ClientProfile = test::read_body_json(existing_profile).await; + + // Attempt to add overrides for XXX and YYY to the shared profile, should succeed + let resp = api + .add_client_profile_overrides( + &existing_profile.id.to_string(), + vec![ + ClientProfileOverride::new(test_file_hash_xxx.clone(), "mods/test0.jar"), + ClientProfileOverride::new(test_file_hash_yyy.clone(), "mods/test1.jar"), + ], + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // Approve one of the projects (A), should succeed + let resp = api + .edit_project( + "unapproved_a", + serde_json::json!({"status": "approved"}), + MOD_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + // Shared profile should have its XXX override file removed and converted to a version matching + let version_for_a = api + .get_version_deserialized(&unapproved_project_a.versions[0].to_string(), USER_USER_PAT) + .await; + let profile_downloads = api + .download_client_profile_from_profile_id_deserialized( + &existing_profile.id.to_string(), + USER_USER_PAT, + ) + .await; + assert_eq!( + profile_downloads + .override_cdns + .into_iter() + .map(|x| x.install_path) + .collect::>(), + [PathBuf::from("mods/test1.jar")] + .iter() + .cloned() + .collect::>() + ); + assert_eq!(profile_downloads.version_ids, vec![version_for_a.id]); + + // Attempt to add a version with YYY to the approved project (C), should succeed + let resp = api + .add_public_version( + approved_project_c.id, + "1.0.0", + test_file_hash_yyy.clone(), + None, + None, + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::OK); + + // Get the profile again, should have a version now + let approved_project_c = api + .get_project_deserialized(&approved_project_c.slug.unwrap(), USER_USER_PAT) + .await; + + // Shared profile should have its YYY override file removed and converted to a version matching + let version_for_c = api + .get_version_deserialized(&approved_project_c.versions[0].to_string(), USER_USER_PAT) + .await; + let profile_downloads = api + .download_client_profile_from_profile_id_deserialized( + &existing_profile.id.to_string(), + USER_USER_PAT, + ) + .await; + assert_eq!( + profile_downloads + .override_cdns + .into_iter() + .map(|x| x.install_path) + .collect::>(), + HashSet::::new() + ); + assert_eq!( + profile_downloads.version_ids, + vec![version_for_a.id, version_for_c.id] + ); + }) + .await; +} + +// TODO: Should we allow multiple overrides at the same path? +// TODO: Potentially setup a filesystem test to ensure that the files are actually being uploaded to the CDN diff --git a/tests/project.rs b/tests/project.rs index 74170565..4e2cbd15 100644 --- a/tests/project.rs +++ b/tests/project.rs @@ -176,6 +176,18 @@ async fn test_add_remove_project() { assert!(project.versions.len() == 1); let uploaded_version_id = project.versions[0]; + // Approve the project, which 'claims' the hash + let resp = api + .edit_project( + "demo", + json!({ + "status": "approved", + }), + MOD_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + // Checks files to ensure they were uploaded and correctly identify the file let hash = sha1::Sha1::from(basic_mod_file.bytes()) .digest() @@ -190,7 +202,7 @@ async fn test_add_remove_project() { let resp = api .create_project( ProjectCreationRequestData { - slug: "demo".to_string(), + slug: "".to_string(), // Slug not needed at this point segment_data: vec![ json_diff_slug_file_segment.clone(), file_diff_name_segment.clone(), @@ -206,7 +218,7 @@ async fn test_add_remove_project() { let resp = api .create_project( ProjectCreationRequestData { - slug: "demo".to_string(), + slug: "".to_string(), // Slug not needed at this point segment_data: vec![ json_diff_file_segment.clone(), file_diff_name_content_segment.clone(), @@ -222,7 +234,7 @@ async fn test_add_remove_project() { let resp = api .create_project( ProjectCreationRequestData { - slug: "demo".to_string(), + slug: "".to_string(), // Slug not needed at this point segment_data: vec![ json_diff_slug_file_segment.clone(), file_diff_name_content_segment.clone(), diff --git a/tests/scopes.rs b/tests/scopes.rs index 7ebc637b..7d012837 100644 --- a/tests/scopes.rs +++ b/tests/scopes.rs @@ -379,6 +379,29 @@ pub async fn project_version_reads_scopes() { .await .unwrap(); + // As moderator, approve the project then set it to private + // We need to approve it to claim the hash for 'get_version_from_hash' and such to work. + // We end with private, and the version remains 'draft' later so we can test the scope. + let resp = test_env + .api + .edit_project( + beta_project_id, + json!({ "status": "approved" }), + MOD_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + + let resp = test_env + .api + .edit_project( + beta_project_id, + json!({ "status": "private" }), + MOD_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + // Version reading // First, set version to hidden (which is when the scope is required to read it) let read_version = Scopes::VERSION_READ; diff --git a/tests/v2/project.rs b/tests/v2/project.rs index 1352ee1f..dcef8d2b 100644 --- a/tests/v2/project.rs +++ b/tests/v2/project.rs @@ -6,7 +6,8 @@ use crate::{ api_common::{ApiProject, ApiVersion, AppendsOptionalPat}, api_v2::{request_data::get_public_project_creation_data_json, ApiV2}, database::{ - generate_random_name, ADMIN_USER_PAT, FRIEND_USER_ID, FRIEND_USER_PAT, USER_USER_PAT, + generate_random_name, ADMIN_USER_PAT, FRIEND_USER_ID, FRIEND_USER_PAT, MOD_USER_PAT, + USER_USER_PAT, }, dummy_data::TestFile, environment::{with_test_environment, TestEnvironment}, @@ -174,6 +175,18 @@ async fn test_add_remove_project() { assert!(project.versions.len() == 1); let uploaded_version_id = project.versions[0]; + // Approve the project, which 'claims' the hash + let resp = api + .edit_project( + "demo", + json!({ + "status": "approved", + }), + MOD_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::NO_CONTENT); + // Checks files to ensure they were uploaded and correctly identify the file let hash = sha1::Sha1::from(basic_mod_file.bytes()) .digest() diff --git a/tests/v2/version.rs b/tests/v2/version.rs index c4eceea1..770f2ad4 100644 --- a/tests/v2/version.rs +++ b/tests/v2/version.rs @@ -145,7 +145,6 @@ async fn version_updates() { .. } = &test_env.dummy.project_alpha; let DummyProjectBeta { - version_id: beta_version_id, file_hash: beta_version_hash, .. } = &test_env.dummy.project_beta; @@ -164,12 +163,11 @@ async fn version_updates() { USER_USER_PAT, ) .await; - assert_eq!(versions.len(), 2); + assert_eq!(versions.len(), 1); // Beta version should not be returned, not approved yet and hasn't claimed hash assert_eq!( &versions[alpha_version_hash].id.to_string(), alpha_version_id ); - assert_eq!(&versions[beta_version_hash].id.to_string(), beta_version_id); // When there is only the one version, there should be no updates let version = api diff --git a/tests/version.rs b/tests/version.rs index de587831..a235f86a 100644 --- a/tests/version.rs +++ b/tests/version.rs @@ -6,6 +6,8 @@ use crate::common::dummy_data::{DummyProjectAlpha, DummyProjectBeta, TestFile}; use crate::common::get_json_val_str; use actix_http::StatusCode; use actix_web::test; +use common::api_common::ApiProject; +use common::api_v3::request_data::get_public_project_creation_data; use common::api_v3::ApiV3; use common::asserts::assert_common_version_ids; use common::database::USER_USER_PAT; @@ -14,7 +16,7 @@ use futures::StreamExt; use labrinth::database::models::version_item::VERSIONS_NAMESPACE; use labrinth::models::ids::base62_impl::parse_base62; use labrinth::models::projects::{ - Dependency, DependencyType, VersionId, VersionStatus, VersionType, + Dependency, DependencyType, Project, VersionId, VersionStatus, VersionType, }; use labrinth::routes::v3::version_file::FileUpdateData; use serde_json::json; @@ -81,6 +83,138 @@ async fn test_get_version() { .await; } +#[actix_rt::test] +async fn version_updates_non_public_is_nothing() { + // Test setup and dummy data + with_test_environment( + None, + |test_env: common::environment::TestEnvironment| async move { + // Confirm that hash-finding functions return nothing for versions attached to non-public projects + // This is a necessity because now hashes do not uniquely identify versions, and these functions expect being able to find a single version from a hash + // This is the case even if we are the owner of the project/versions + let api = &test_env.api; + + // First, create project gamma, which is private + let gamma_creation_data = get_public_project_creation_data("gamma", None, None); + let gamma_project = api.create_project(gamma_creation_data, USER_USER_PAT).await; + let gamma_project: Project = test::read_body_json(gamma_project).await; + + let mut sha1_hashes = vec![]; + // Create 5 versions, and we will add them to both beta and gamma + for i in 0..5 { + let file = TestFile::build_random_jar(); + let version = api + .add_public_version_deserialized( + gamma_project.id, + &format!("1.2.3.{}", i), + file.clone(), + None, + None, + USER_USER_PAT, + ) + .await; + + api.add_public_version_deserialized( + test_env.dummy.project_beta.project_id_parsed, + &format!("1.2.3.{}", i), + file.clone(), + None, + None, + USER_USER_PAT, + ) + .await; + + sha1_hashes.push(version.files[0].hashes["sha1"].clone()); + } + + for approved in [false, true] { + // get_version_from_hash + let resp = api + .get_version_from_hash(&sha1_hashes[0], "sha1", USER_USER_PAT) + .await; + if approved { + assert_status!(&resp, StatusCode::OK); + } else { + assert_status!(&resp, StatusCode::NOT_FOUND); + } + + // get_versions_from_hashes + let resp = api + .get_versions_from_hashes(&[&sha1_hashes[0]], "sha1", USER_USER_PAT) + .await; + assert_status!(&resp, StatusCode::OK); + let body: HashMap = test::read_body_json(resp).await; + if approved { + assert_eq!(body.len(), 1); + } else { + assert_eq!(body.len(), 0); + } + + // get_update_from_hash + let resp = api + .get_update_from_hash(&sha1_hashes[0], "sha1", None, None, None, USER_USER_PAT) + .await; + if approved { + assert_status!(&resp, StatusCode::OK); + } else { + assert_status!(&resp, StatusCode::NOT_FOUND); + } + + // update_files + let resp = api + .update_files( + "sha1", + vec![sha1_hashes[0].clone()], + None, + None, + None, + USER_USER_PAT, + ) + .await; + assert_status!(&resp, StatusCode::OK); + let body: HashMap = test::read_body_json(resp).await; + if approved { + assert_eq!(body.len(), 1); + } else { + assert_eq!(body.len(), 0); + } + + // update_individual_files + let hashes = vec![FileUpdateData { + hash: sha1_hashes[0].clone(), + loaders: None, + loader_fields: None, + version_types: None, + }]; + let resp = api + .update_individual_files("sha1", hashes, USER_USER_PAT) + .await; + assert_status!(&resp, StatusCode::OK); + + let body: HashMap = test::read_body_json(resp).await; + if approved { + assert_eq!(body.len(), 1); + } else { + assert_eq!(body.len(), 0); + } + + // Now, make the project public for the next loop, and confirm that the functions work + if !approved { + api.edit_project( + &gamma_project.id.to_string(), + json!({ + "status": "approved", + }), + MOD_USER_PAT, + ) + .await; + } + } + }, + ) + .await; +} + #[actix_rt::test] async fn version_updates() { // Test setup and dummy data @@ -96,7 +230,6 @@ async fn version_updates() { .. } = &test_env.dummy.project_alpha; let DummyProjectBeta { - version_id: beta_version_id, file_hash: beta_version_hash, .. } = &test_env.dummy.project_beta; @@ -119,12 +252,11 @@ async fn version_updates() { USER_USER_PAT, ) .await; - assert_eq!(versions.len(), 2); + assert_eq!(versions.len(), 1); // Beta version should not be returned, not approved yet and hasn't claimed hash assert_eq!( &versions[alpha_version_hash].id.to_string(), alpha_version_id ); - assert_eq!(&versions[beta_version_hash].id.to_string(), beta_version_id); // When there is only the one version, there should be no updates let version = api @@ -182,11 +314,12 @@ async fn version_updates() { ] .iter() { + let file = TestFile::build_random_jar(); let version = api .add_public_version_deserialized( *alpha_project_id_parsed, version_number, - TestFile::build_random_jar(), + file.clone(), None, None, USER_USER_PAT,