This adds support for the upcoming aes256k key type in CephX that is

based on Kerberos 5 and brings a bunch of assorted CephFS fixes from
 Ethan and Sam.  One of Sam's patches in particular undoes a change in
 the fscrypt area that had an inadvertent side effect of making CephFS
 behave as if mounted with wsize=4096 and leading to the corresponding
 degradation in performance, especially for sequential writes.
 -----BEGIN PGP SIGNATURE-----
 
 iQFHBAABCAAxFiEEydHwtzie9C7TfviiSn/eOAIR84sFAmmUpbQTHGlkcnlvbW92
 QGdtYWlsLmNvbQAKCRBKf944AhHziy+iB/9oWArHfGu/OLbmb+gQEikcGVmzr9r/
 XE3Pcp6JQUMUf8mlOf18RdWn+ak509jQcnJDSyXzk+mHBOw/+VwPod3bZZGNHcYw
 RwaUAWh9r79Bm0FnUewfQguj2FFnW1X4SrBrGCqsl/yOXbzHAGvDVzsoditfSB+J
 8NPYJeFOk6VpRx5Qie66t2wwUoI/VtGs++D9R0CWEy1EpROH/nRkcTk7KlnfSIV0
 FWSItUmssxp7Gm67O12390PxC0ZfQ6ApPNl5UOVkL7kfjqYsQKY948qlsTFHHFiM
 M58fGysAfsfTCXuFWjnmTGhLubV2d9fdIN8OjYFaOjpXeJQ6WRAg8nbe
 =jx2K
 -----END PGP SIGNATURE-----

Merge tag 'ceph-for-7.0-rc1' of https://github.com/ceph/ceph-client

Pull ceph updates from Ilya Dryomov:
 "This adds support for the upcoming aes256k key type in CephX that is
  based on Kerberos 5 and brings a bunch of assorted CephFS fixes from
  Ethan and Sam. One of Sam's patches in particular undoes a change in
  the fscrypt area that had an inadvertent side effect of making CephFS
  behave as if mounted with wsize=4096 and leading to the corresponding
  degradation in performance, especially for sequential writes"

* tag 'ceph-for-7.0-rc1' of https://github.com/ceph/ceph-client:
  ceph: assert loop invariants in ceph_writepages_start()
  ceph: remove error return from ceph_process_folio_batch()
  ceph: fix write storm on fscrypted files
  ceph: do not propagate page array emplacement errors as batch errors
  ceph: supply snapshot context in ceph_uninline_data()
  ceph: supply snapshot context in ceph_zero_partial_object()
  libceph: adapt ceph_x_challenge_blob hashing and msgr1 message signing
  libceph: add support for CEPH_CRYPTO_AES256KRB5
  libceph: introduce ceph_crypto_key_prepare()
  libceph: generalize ceph_x_encrypt_offset() and ceph_x_encrypt_buflen()
  libceph: define and enforce CEPH_MAX_KEY_LEN
This commit is contained in:
Linus Torvalds 2026-02-17 15:18:51 -08:00
commit 87a367f1bf
9 changed files with 482 additions and 135 deletions

View file

@ -1000,7 +1000,8 @@ unsigned int ceph_define_write_size(struct address_space *mapping)
{
struct inode *inode = mapping->host;
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
unsigned int wsize = i_blocksize(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
unsigned int wsize = ci->i_layout.stripe_unit;
if (fsc->mount_options->wsize < wsize)
wsize = fsc->mount_options->wsize;
@ -1283,16 +1284,16 @@ static inline int move_dirty_folio_in_page_array(struct address_space *mapping,
}
static
int ceph_process_folio_batch(struct address_space *mapping,
struct writeback_control *wbc,
struct ceph_writeback_ctl *ceph_wbc)
void ceph_process_folio_batch(struct address_space *mapping,
struct writeback_control *wbc,
struct ceph_writeback_ctl *ceph_wbc)
{
struct inode *inode = mapping->host;
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_client *cl = fsc->client;
struct folio *folio = NULL;
unsigned i;
int rc = 0;
int rc;
for (i = 0; can_next_page_be_processed(ceph_wbc, i); i++) {
folio = ceph_wbc->fbatch.folios[i];
@ -1322,12 +1323,10 @@ int ceph_process_folio_batch(struct address_space *mapping,
rc = ceph_check_page_before_write(mapping, wbc,
ceph_wbc, folio);
if (rc == -ENODATA) {
rc = 0;
folio_unlock(folio);
ceph_wbc->fbatch.folios[i] = NULL;
continue;
} else if (rc == -E2BIG) {
rc = 0;
folio_unlock(folio);
ceph_wbc->fbatch.folios[i] = NULL;
break;
@ -1379,8 +1378,6 @@ int ceph_process_folio_batch(struct address_space *mapping,
}
ceph_wbc->processed_in_fbatch = i;
return rc;
}
static inline
@ -1666,7 +1663,9 @@ retry:
tag_pages_for_writeback(mapping, ceph_wbc.index, ceph_wbc.end);
while (!has_writeback_done(&ceph_wbc)) {
ceph_wbc.locked_pages = 0;
BUG_ON(ceph_wbc.locked_pages);
BUG_ON(ceph_wbc.pages);
ceph_wbc.max_pages = ceph_wbc.wsize >> PAGE_SHIFT;
get_more_pages:
@ -1684,10 +1683,8 @@ get_more_pages:
break;
process_folio_batch:
rc = ceph_process_folio_batch(mapping, wbc, &ceph_wbc);
ceph_process_folio_batch(mapping, wbc, &ceph_wbc);
ceph_shift_unused_folios_left(&ceph_wbc.fbatch);
if (rc)
goto release_folios;
/* did we get anything? */
if (!ceph_wbc.locked_pages)
@ -2199,6 +2196,7 @@ int ceph_uninline_data(struct file *file)
struct ceph_osd_request *req = NULL;
struct ceph_cap_flush *prealloc_cf = NULL;
struct folio *folio = NULL;
struct ceph_snap_context *snapc = NULL;
u64 inline_version = CEPH_INLINE_NONE;
struct page *pages[1];
int err = 0;
@ -2226,6 +2224,24 @@ int ceph_uninline_data(struct file *file)
if (inline_version == 1) /* initial version, no data */
goto out_uninline;
down_read(&fsc->mdsc->snap_rwsem);
spin_lock(&ci->i_ceph_lock);
if (__ceph_have_pending_cap_snap(ci)) {
struct ceph_cap_snap *capsnap =
list_last_entry(&ci->i_cap_snaps,
struct ceph_cap_snap,
ci_item);
snapc = ceph_get_snap_context(capsnap->context);
} else {
if (!ci->i_head_snapc) {
ci->i_head_snapc = ceph_get_snap_context(
ci->i_snap_realm->cached_context);
}
snapc = ceph_get_snap_context(ci->i_head_snapc);
}
spin_unlock(&ci->i_ceph_lock);
up_read(&fsc->mdsc->snap_rwsem);
folio = read_mapping_folio(inode->i_mapping, 0, file);
if (IS_ERR(folio)) {
err = PTR_ERR(folio);
@ -2241,7 +2257,7 @@ int ceph_uninline_data(struct file *file)
req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
ceph_vino(inode), 0, &len, 0, 1,
CEPH_OSD_OP_CREATE, CEPH_OSD_FLAG_WRITE,
NULL, 0, 0, false);
snapc, 0, 0, false);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto out_unlock;
@ -2257,7 +2273,7 @@ int ceph_uninline_data(struct file *file)
req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
ceph_vino(inode), 0, &len, 1, 3,
CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
NULL, ci->i_truncate_seq,
snapc, ci->i_truncate_seq,
ci->i_truncate_size, false);
if (IS_ERR(req)) {
err = PTR_ERR(req);
@ -2320,6 +2336,7 @@ out_unlock:
folio_put(folio);
}
out:
ceph_put_snap_context(snapc);
ceph_free_cap_flush(prealloc_cf);
doutc(cl, "%llx.%llx inline_version %llu = %d\n",
ceph_vinop(inode), inline_version, err);

View file

@ -2568,6 +2568,7 @@ static int ceph_zero_partial_object(struct inode *inode,
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_osd_request *req;
struct ceph_snap_context *snapc;
int ret = 0;
loff_t zero = 0;
int op;
@ -2582,12 +2583,25 @@ static int ceph_zero_partial_object(struct inode *inode,
op = CEPH_OSD_OP_ZERO;
}
spin_lock(&ci->i_ceph_lock);
if (__ceph_have_pending_cap_snap(ci)) {
struct ceph_cap_snap *capsnap =
list_last_entry(&ci->i_cap_snaps,
struct ceph_cap_snap,
ci_item);
snapc = ceph_get_snap_context(capsnap->context);
} else {
BUG_ON(!ci->i_head_snapc);
snapc = ceph_get_snap_context(ci->i_head_snapc);
}
spin_unlock(&ci->i_ceph_lock);
req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
ceph_vino(inode),
offset, length,
0, 1, op,
CEPH_OSD_FLAG_WRITE,
NULL, 0, 0, false);
snapc, 0, 0, false);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
goto out;
@ -2601,6 +2615,7 @@ static int ceph_zero_partial_object(struct inode *inode,
ceph_osdc_put_request(req);
out:
ceph_put_snap_context(snapc);
return ret;
}

View file

@ -89,8 +89,9 @@ struct ceph_dir_layout {
} __attribute__ ((packed));
/* crypto algorithms */
#define CEPH_CRYPTO_NONE 0x0
#define CEPH_CRYPTO_AES 0x1
#define CEPH_CRYPTO_NONE 0x0
#define CEPH_CRYPTO_AES 0x1
#define CEPH_CRYPTO_AES256KRB5 0x2 /* AES256-CTS-HMAC384-192 */
#define CEPH_AES_IV "cephsageyudagreg"

View file

@ -6,6 +6,7 @@ config CEPH_LIB
select CRYPTO_AES
select CRYPTO_CBC
select CRYPTO_GCM
select CRYPTO_KRB5
select CRYPTO_LIB_SHA256
select CRYPTO
select KEYS

View file

@ -17,6 +17,22 @@
#include "auth_x.h"
#include "auth_x_protocol.h"
static const u32 ticket_key_usages[] = {
CEPHX_KEY_USAGE_TICKET_SESSION_KEY,
CEPHX_KEY_USAGE_TICKET_BLOB,
CEPHX_KEY_USAGE_AUTH_CONNECTION_SECRET
};
static const u32 authorizer_key_usages[] = {
CEPHX_KEY_USAGE_AUTHORIZE,
CEPHX_KEY_USAGE_AUTHORIZE_CHALLENGE,
CEPHX_KEY_USAGE_AUTHORIZE_REPLY
};
static const u32 client_key_usages[] = {
CEPHX_KEY_USAGE_TICKET_SESSION_KEY
};
static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed);
static int ceph_x_is_authenticated(struct ceph_auth_client *ac)
@ -44,28 +60,41 @@ static int ceph_x_should_authenticate(struct ceph_auth_client *ac)
return !!need;
}
static int ceph_x_encrypt_offset(void)
static int __ceph_x_encrypt_offset(const struct ceph_crypto_key *key)
{
return sizeof(u32) + sizeof(struct ceph_x_encrypt_header);
return ceph_crypt_data_offset(key) +
sizeof(struct ceph_x_encrypt_header);
}
static int ceph_x_encrypt_buflen(int ilen)
static int ceph_x_encrypt_offset(const struct ceph_crypto_key *key)
{
return ceph_x_encrypt_offset() + ilen + 16;
return sizeof(u32) + __ceph_x_encrypt_offset(key);
}
static int ceph_x_encrypt(struct ceph_crypto_key *secret, void *buf,
int buf_len, int plaintext_len)
/*
* AES: ciphertext_len | hdr | data... | padding
* AES256KRB5: ciphertext_len | confounder | hdr | data... | hmac
*/
static int ceph_x_encrypt_buflen(const struct ceph_crypto_key *key,
int data_len)
{
struct ceph_x_encrypt_header *hdr = buf + sizeof(u32);
int encrypt_len = sizeof(struct ceph_x_encrypt_header) + data_len;
return sizeof(u32) + ceph_crypt_buflen(key, encrypt_len);
}
static int ceph_x_encrypt(const struct ceph_crypto_key *key, int usage_slot,
void *buf, int buf_len, int plaintext_len)
{
struct ceph_x_encrypt_header *hdr;
int ciphertext_len;
int ret;
hdr = buf + sizeof(u32) + ceph_crypt_data_offset(key);
hdr->struct_v = 1;
hdr->magic = cpu_to_le64(CEPHX_ENC_MAGIC);
ret = ceph_crypt(secret, true, buf + sizeof(u32), buf_len - sizeof(u32),
plaintext_len + sizeof(struct ceph_x_encrypt_header),
ret = ceph_crypt(key, usage_slot, true, buf + sizeof(u32),
buf_len - sizeof(u32), plaintext_len + sizeof(*hdr),
&ciphertext_len);
if (ret)
return ret;
@ -74,18 +103,19 @@ static int ceph_x_encrypt(struct ceph_crypto_key *secret, void *buf,
return sizeof(u32) + ciphertext_len;
}
static int __ceph_x_decrypt(struct ceph_crypto_key *secret, void *p,
int ciphertext_len)
static int __ceph_x_decrypt(const struct ceph_crypto_key *key, int usage_slot,
void *p, int ciphertext_len)
{
struct ceph_x_encrypt_header *hdr = p;
struct ceph_x_encrypt_header *hdr;
int plaintext_len;
int ret;
ret = ceph_crypt(secret, false, p, ciphertext_len, ciphertext_len,
&plaintext_len);
ret = ceph_crypt(key, usage_slot, false, p, ciphertext_len,
ciphertext_len, &plaintext_len);
if (ret)
return ret;
hdr = p + ceph_crypt_data_offset(key);
if (le64_to_cpu(hdr->magic) != CEPHX_ENC_MAGIC) {
pr_err("%s bad magic\n", __func__);
return -EINVAL;
@ -94,7 +124,8 @@ static int __ceph_x_decrypt(struct ceph_crypto_key *secret, void *p,
return plaintext_len - sizeof(*hdr);
}
static int ceph_x_decrypt(struct ceph_crypto_key *secret, void **p, void *end)
static int ceph_x_decrypt(const struct ceph_crypto_key *key, int usage_slot,
void **p, void *end)
{
int ciphertext_len;
int ret;
@ -102,7 +133,7 @@ static int ceph_x_decrypt(struct ceph_crypto_key *secret, void **p, void *end)
ceph_decode_32_safe(p, end, ciphertext_len, e_inval);
ceph_decode_need(p, end, ciphertext_len, e_inval);
ret = __ceph_x_decrypt(secret, *p, ciphertext_len);
ret = __ceph_x_decrypt(key, usage_slot, *p, ciphertext_len);
if (ret < 0)
return ret;
@ -193,8 +224,10 @@ static int process_one_ticket(struct ceph_auth_client *ac,
}
/* blob for me */
dp = *p + ceph_x_encrypt_offset();
ret = ceph_x_decrypt(secret, p, end);
dp = *p + ceph_x_encrypt_offset(secret);
ret = ceph_x_decrypt(secret,
0 /* CEPHX_KEY_USAGE_TICKET_SESSION_KEY */,
p, end);
if (ret < 0)
goto out;
dout(" decrypted %d bytes\n", ret);
@ -208,6 +241,11 @@ static int process_one_ticket(struct ceph_auth_client *ac,
if (ret)
goto out;
ret = ceph_crypto_key_prepare(&new_session_key, ticket_key_usages,
ARRAY_SIZE(ticket_key_usages));
if (ret)
goto out;
ceph_decode_need(&dp, dend, sizeof(struct ceph_timespec), bad);
ceph_decode_timespec64(&validity, dp);
dp += sizeof(struct ceph_timespec);
@ -220,8 +258,10 @@ static int process_one_ticket(struct ceph_auth_client *ac,
ceph_decode_8_safe(p, end, is_enc, bad);
if (is_enc) {
/* encrypted */
tp = *p + ceph_x_encrypt_offset();
ret = ceph_x_decrypt(&th->session_key, p, end);
tp = *p + ceph_x_encrypt_offset(&th->session_key);
ret = ceph_x_decrypt(&th->session_key,
1 /* CEPHX_KEY_USAGE_TICKET_BLOB */,
p, end);
if (ret < 0)
goto out;
dout(" encrypted ticket, decrypted %d bytes\n", ret);
@ -312,7 +352,7 @@ static int encrypt_authorizer(struct ceph_x_authorizer *au,
p = (void *)(msg_a + 1) + le32_to_cpu(msg_a->ticket_blob.blob_len);
end = au->buf->vec.iov_base + au->buf->vec.iov_len;
msg_b = p + ceph_x_encrypt_offset();
msg_b = p + ceph_x_encrypt_offset(&au->session_key);
msg_b->struct_v = 2;
msg_b->nonce = cpu_to_le64(au->nonce);
if (server_challenge) {
@ -324,7 +364,9 @@ static int encrypt_authorizer(struct ceph_x_authorizer *au,
msg_b->server_challenge_plus_one = 0;
}
ret = ceph_x_encrypt(&au->session_key, p, end - p, sizeof(*msg_b));
ret = ceph_x_encrypt(&au->session_key,
0 /* CEPHX_KEY_USAGE_AUTHORIZE */,
p, end - p, sizeof(*msg_b));
if (ret < 0)
return ret;
@ -367,8 +409,13 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
if (ret)
goto out_au;
ret = ceph_crypto_key_prepare(&au->session_key, authorizer_key_usages,
ARRAY_SIZE(authorizer_key_usages));
if (ret)
goto out_au;
maxlen = sizeof(*msg_a) + ticket_blob_len +
ceph_x_encrypt_buflen(sizeof(*msg_b));
ceph_x_encrypt_buflen(&au->session_key, sizeof(*msg_b));
dout(" need len %d\n", maxlen);
if (au->buf && au->buf->alloc_len < maxlen) {
ceph_buffer_put(au->buf);
@ -506,8 +553,7 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
if (need & CEPH_ENTITY_TYPE_AUTH) {
struct ceph_x_authenticate *auth = (void *)(head + 1);
void *enc_buf = xi->auth_authorizer.enc_buf;
struct ceph_x_challenge_blob *blob = enc_buf +
ceph_x_encrypt_offset();
struct ceph_x_challenge_blob *blob;
u64 *u;
p = auth + 1;
@ -517,14 +563,29 @@ static int ceph_x_build_request(struct ceph_auth_client *ac,
dout(" get_auth_session_key\n");
head->op = cpu_to_le16(CEPHX_GET_AUTH_SESSION_KEY);
/* encrypt and hash */
if (xi->secret.type == CEPH_CRYPTO_AES) {
blob = enc_buf + ceph_x_encrypt_offset(&xi->secret);
} else {
BUILD_BUG_ON(SHA256_DIGEST_SIZE + sizeof(*blob) >
CEPHX_AU_ENC_BUF_LEN);
blob = enc_buf + SHA256_DIGEST_SIZE;
}
get_random_bytes(&auth->client_challenge, sizeof(u64));
blob->client_challenge = auth->client_challenge;
blob->server_challenge = cpu_to_le64(xi->server_challenge);
ret = ceph_x_encrypt(&xi->secret, enc_buf, CEPHX_AU_ENC_BUF_LEN,
sizeof(*blob));
if (ret < 0)
return ret;
if (xi->secret.type == CEPH_CRYPTO_AES) {
ret = ceph_x_encrypt(&xi->secret, 0 /* dummy */,
enc_buf, CEPHX_AU_ENC_BUF_LEN,
sizeof(*blob));
if (ret < 0)
return ret;
} else {
ceph_hmac_sha256(&xi->secret, blob, sizeof(*blob),
enc_buf);
ret = SHA256_DIGEST_SIZE;
}
auth->struct_v = 3; /* nautilus+ */
auth->key = 0;
@ -634,8 +695,10 @@ static int handle_auth_session_key(struct ceph_auth_client *ac, u64 global_id,
ceph_decode_need(p, end, len, e_inval);
dout("%s connection secret blob len %d\n", __func__, len);
if (len > 0) {
dp = *p + ceph_x_encrypt_offset();
ret = ceph_x_decrypt(&th->session_key, p, *p + len);
dp = *p + ceph_x_encrypt_offset(&th->session_key);
ret = ceph_x_decrypt(&th->session_key,
2 /* CEPHX_KEY_USAGE_AUTH_CONNECTION_SECRET */,
p, *p + len);
if (ret < 0)
return ret;
@ -799,12 +862,14 @@ static int decrypt_authorizer_challenge(struct ceph_crypto_key *secret,
int ret;
/* no leading len */
ret = __ceph_x_decrypt(secret, challenge, challenge_len);
ret = __ceph_x_decrypt(secret,
1 /* CEPHX_KEY_USAGE_AUTHORIZE_CHALLENGE */,
challenge, challenge_len);
if (ret < 0)
return ret;
dout("%s decrypted %d bytes\n", __func__, ret);
dp = challenge + sizeof(struct ceph_x_encrypt_header);
dp = challenge + __ceph_x_encrypt_offset(secret);
dend = dp + ret;
ceph_decode_skip_8(&dp, dend, e_inval); /* struct_v */
@ -851,8 +916,9 @@ static int decrypt_authorizer_reply(struct ceph_crypto_key *secret,
u8 struct_v;
int ret;
dp = *p + ceph_x_encrypt_offset();
ret = ceph_x_decrypt(secret, p, end);
dp = *p + ceph_x_encrypt_offset(secret);
ret = ceph_x_decrypt(secret, 2 /* CEPHX_KEY_USAGE_AUTHORIZE_REPLY */,
p, end);
if (ret < 0)
return ret;
@ -974,7 +1040,8 @@ static int calc_signature(struct ceph_x_authorizer *au, struct ceph_msg *msg,
__le32 front_crc;
__le32 middle_crc;
__le32 data_crc;
} __packed *sigblock = enc_buf + ceph_x_encrypt_offset();
} __packed *sigblock = enc_buf +
ceph_x_encrypt_offset(&au->session_key);
sigblock->len = cpu_to_le32(4*sizeof(u32));
sigblock->header_crc = msg->hdr.crc;
@ -982,8 +1049,9 @@ static int calc_signature(struct ceph_x_authorizer *au, struct ceph_msg *msg,
sigblock->middle_crc = msg->footer.middle_crc;
sigblock->data_crc = msg->footer.data_crc;
ret = ceph_x_encrypt(&au->session_key, enc_buf,
CEPHX_AU_ENC_BUF_LEN, sizeof(*sigblock));
ret = ceph_x_encrypt(&au->session_key, 0 /* dummy */,
enc_buf, CEPHX_AU_ENC_BUF_LEN,
sizeof(*sigblock));
if (ret < 0)
return ret;
@ -998,11 +1066,19 @@ static int calc_signature(struct ceph_x_authorizer *au, struct ceph_msg *msg,
__le32 data_crc;
__le32 data_len;
__le32 seq_lower_word;
} __packed *sigblock = enc_buf;
} __packed *sigblock;
struct {
__le64 a, b, c, d;
} __packed *penc = enc_buf;
int ciphertext_len;
if (au->session_key.type == CEPH_CRYPTO_AES) {
/* no leading len, no ceph_x_encrypt_header */
sigblock = enc_buf;
} else {
BUILD_BUG_ON(SHA256_DIGEST_SIZE + sizeof(*sigblock) >
CEPHX_AU_ENC_BUF_LEN);
sigblock = enc_buf + SHA256_DIGEST_SIZE;
}
sigblock->header_crc = msg->hdr.crc;
sigblock->front_crc = msg->footer.front_crc;
@ -1013,12 +1089,18 @@ static int calc_signature(struct ceph_x_authorizer *au, struct ceph_msg *msg,
sigblock->data_len = msg->hdr.data_len;
sigblock->seq_lower_word = *(__le32 *)&msg->hdr.seq;
/* no leading len, no ceph_x_encrypt_header */
ret = ceph_crypt(&au->session_key, true, enc_buf,
CEPHX_AU_ENC_BUF_LEN, sizeof(*sigblock),
&ciphertext_len);
if (ret)
return ret;
if (au->session_key.type == CEPH_CRYPTO_AES) {
int ciphertext_len; /* unused */
ret = ceph_crypt(&au->session_key, 0 /* dummy */,
true, enc_buf, CEPHX_AU_ENC_BUF_LEN,
sizeof(*sigblock), &ciphertext_len);
if (ret)
return ret;
} else {
ceph_hmac_sha256(&au->session_key, sigblock,
sizeof(*sigblock), enc_buf);
}
*psig = penc->a ^ penc->b ^ penc->c ^ penc->d;
}
@ -1092,21 +1174,27 @@ int ceph_x_init(struct ceph_auth_client *ac)
int ret;
dout("ceph_x_init %p\n", ac);
ret = -ENOMEM;
xi = kzalloc(sizeof(*xi), GFP_NOFS);
if (!xi)
goto out;
return -ENOMEM;
ret = -EINVAL;
if (!ac->key) {
pr_err("no secret set (for auth_x protocol)\n");
goto out_nomem;
goto err_xi;
}
ret = ceph_crypto_key_clone(&xi->secret, ac->key);
if (ret < 0) {
pr_err("cannot clone key: %d\n", ret);
goto out_nomem;
goto err_xi;
}
ret = ceph_crypto_key_prepare(&xi->secret, client_key_usages,
ARRAY_SIZE(client_key_usages));
if (ret) {
pr_err("cannot prepare key: %d\n", ret);
goto err_secret;
}
xi->starting = true;
@ -1117,8 +1205,9 @@ int ceph_x_init(struct ceph_auth_client *ac)
ac->ops = &ceph_x_ops;
return 0;
out_nomem:
err_secret:
ceph_crypto_key_destroy(&xi->secret);
err_xi:
kfree(xi);
out:
return ret;
}

View file

@ -6,6 +6,44 @@
#define CEPHX_GET_PRINCIPAL_SESSION_KEY 0x0200
#define CEPHX_GET_ROTATING_KEY 0x0400
/* Client <-> AuthMonitor */
/*
* The AUTH session's connection secret: encrypted with the AUTH
* ticket session key
*/
#define CEPHX_KEY_USAGE_AUTH_CONNECTION_SECRET 0x03
/*
* The ticket's blob for the client ("blob for me", contains the
* session key): encrypted with the client's secret key in case of
* the AUTH ticket and the AUTH ticket session key in case of other
* service tickets
*/
#define CEPHX_KEY_USAGE_TICKET_SESSION_KEY 0x04
/*
* The ticket's blob for the service (ceph_x_ticket_blob): possibly
* encrypted with the old AUTH ticket session key in case of the AUTH
* ticket and not encrypted in case of other service tickets
*/
#define CEPHX_KEY_USAGE_TICKET_BLOB 0x05
/* Client <-> Service */
/*
* The client's authorization request (ceph_x_authorize_b):
* encrypted with the service ticket session key
*/
#define CEPHX_KEY_USAGE_AUTHORIZE 0x10
/*
* The service's challenge (ceph_x_authorize_challenge):
* encrypted with the service ticket session key
*/
#define CEPHX_KEY_USAGE_AUTHORIZE_CHALLENGE 0x11
/*
* The service's final reply (ceph_x_authorize_reply + the service
* session's connection secret): encrypted with the service ticket
* session key
*/
#define CEPHX_KEY_USAGE_AUTHORIZE_REPLY 0x12
/* common bits */
struct ceph_x_ticket_blob {
__u8 struct_v;

View file

@ -7,6 +7,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <crypto/aes.h>
#include <crypto/krb5.h>
#include <crypto/skcipher.h>
#include <linux/key-type.h>
#include <linux/sched/mm.h>
@ -16,77 +17,119 @@
#include <linux/ceph/decode.h>
#include "crypto.h"
/*
* Set ->key and ->tfm. The rest of the key should be filled in before
* this function is called.
*/
static int set_secret(struct ceph_crypto_key *key, void *buf)
static int set_aes_tfm(struct ceph_crypto_key *key)
{
unsigned int noio_flag;
int ret;
key->key = NULL;
key->tfm = NULL;
noio_flag = memalloc_noio_save();
key->aes_tfm = crypto_alloc_sync_skcipher("cbc(aes)", 0, 0);
memalloc_noio_restore(noio_flag);
if (IS_ERR(key->aes_tfm)) {
ret = PTR_ERR(key->aes_tfm);
key->aes_tfm = NULL;
return ret;
}
ret = crypto_sync_skcipher_setkey(key->aes_tfm, key->key, key->len);
if (ret)
return ret;
return 0;
}
static int set_krb5_tfms(struct ceph_crypto_key *key, const u32 *key_usages,
int key_usage_cnt)
{
struct krb5_buffer TK = { .len = key->len, .data = key->key };
unsigned int noio_flag;
int ret = 0;
int i;
if (WARN_ON_ONCE(key_usage_cnt > ARRAY_SIZE(key->krb5_tfms)))
return -EINVAL;
key->krb5_type = crypto_krb5_find_enctype(
KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192);
if (!key->krb5_type)
return -ENOPKG;
/*
* Despite crypto_krb5_prepare_encryption() taking a gfp mask,
* crypto_alloc_aead() inside of it allocates with GFP_KERNEL.
*/
noio_flag = memalloc_noio_save();
for (i = 0; i < key_usage_cnt; i++) {
key->krb5_tfms[i] = crypto_krb5_prepare_encryption(
key->krb5_type, &TK, key_usages[i],
GFP_NOIO);
if (IS_ERR(key->krb5_tfms[i])) {
ret = PTR_ERR(key->krb5_tfms[i]);
key->krb5_tfms[i] = NULL;
goto out_flag;
}
}
out_flag:
memalloc_noio_restore(noio_flag);
return ret;
}
int ceph_crypto_key_prepare(struct ceph_crypto_key *key,
const u32 *key_usages, int key_usage_cnt)
{
switch (key->type) {
case CEPH_CRYPTO_NONE:
return 0; /* nothing to do */
case CEPH_CRYPTO_AES:
break;
return set_aes_tfm(key);
case CEPH_CRYPTO_AES256KRB5:
hmac_sha256_preparekey(&key->hmac_key, key->key, key->len);
return set_krb5_tfms(key, key_usages, key_usage_cnt);
default:
return -ENOTSUPP;
}
if (!key->len)
return -EINVAL;
key->key = kmemdup(buf, key->len, GFP_NOIO);
if (!key->key) {
ret = -ENOMEM;
goto fail;
}
/* crypto_alloc_sync_skcipher() allocates with GFP_KERNEL */
noio_flag = memalloc_noio_save();
key->tfm = crypto_alloc_sync_skcipher("cbc(aes)", 0, 0);
memalloc_noio_restore(noio_flag);
if (IS_ERR(key->tfm)) {
ret = PTR_ERR(key->tfm);
key->tfm = NULL;
goto fail;
}
ret = crypto_sync_skcipher_setkey(key->tfm, key->key, key->len);
if (ret)
goto fail;
return 0;
fail:
ceph_crypto_key_destroy(key);
return ret;
}
/*
* @dst should be zeroed before this function is called.
*/
int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
const struct ceph_crypto_key *src)
{
memcpy(dst, src, sizeof(struct ceph_crypto_key));
return set_secret(dst, src->key);
dst->type = src->type;
dst->created = src->created;
dst->len = src->len;
dst->key = kmemdup(src->key, src->len, GFP_NOIO);
if (!dst->key)
return -ENOMEM;
return 0;
}
/*
* @key should be zeroed before this function is called.
*/
int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
{
int ret;
ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
key->type = ceph_decode_16(p);
ceph_decode_copy(p, &key->created, sizeof(key->created));
key->len = ceph_decode_16(p);
ceph_decode_need(p, end, key->len, bad);
ret = set_secret(key, *p);
if (key->len > CEPH_MAX_KEY_LEN) {
pr_err("secret too big %d\n", key->len);
return -EINVAL;
}
key->key = kmemdup(*p, key->len, GFP_NOIO);
if (!key->key)
return -ENOMEM;
memzero_explicit(*p, key->len);
*p += key->len;
return ret;
return 0;
bad:
dout("failed to decode crypto key\n");
@ -122,12 +165,26 @@ int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
{
if (key) {
kfree_sensitive(key->key);
key->key = NULL;
if (key->tfm) {
crypto_free_sync_skcipher(key->tfm);
key->tfm = NULL;
int i;
if (!key)
return;
kfree_sensitive(key->key);
key->key = NULL;
if (key->type == CEPH_CRYPTO_AES) {
if (key->aes_tfm) {
crypto_free_sync_skcipher(key->aes_tfm);
key->aes_tfm = NULL;
}
} else if (key->type == CEPH_CRYPTO_AES256KRB5) {
memzero_explicit(&key->hmac_key, sizeof(key->hmac_key));
for (i = 0; i < ARRAY_SIZE(key->krb5_tfms); i++) {
if (key->krb5_tfms[i]) {
crypto_free_aead(key->krb5_tfms[i]);
key->krb5_tfms[i] = NULL;
}
}
}
}
@ -207,7 +264,7 @@ static void teardown_sgtable(struct sg_table *sgt)
static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
void *buf, int buf_len, int in_len, int *pout_len)
{
SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
SYNC_SKCIPHER_REQUEST_ON_STACK(req, key->aes_tfm);
struct sg_table sgt;
struct scatterlist prealloc_sg;
char iv[AES_BLOCK_SIZE] __aligned(8);
@ -223,7 +280,7 @@ static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
return ret;
memcpy(iv, aes_iv, AES_BLOCK_SIZE);
skcipher_request_set_sync_tfm(req, key->tfm);
skcipher_request_set_sync_tfm(req, key->aes_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv);
@ -268,7 +325,68 @@ out_sgt:
return ret;
}
int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt,
static int ceph_krb5_encrypt(const struct ceph_crypto_key *key, int usage_slot,
void *buf, int buf_len, int in_len, int *pout_len)
{
struct sg_table sgt;
struct scatterlist prealloc_sg;
int ret;
if (WARN_ON_ONCE(usage_slot >= ARRAY_SIZE(key->krb5_tfms)))
return -EINVAL;
ret = setup_sgtable(&sgt, &prealloc_sg, buf, buf_len);
if (ret)
return ret;
ret = crypto_krb5_encrypt(key->krb5_type, key->krb5_tfms[usage_slot],
sgt.sgl, sgt.nents, buf_len, AES_BLOCK_SIZE,
in_len, false);
if (ret < 0) {
pr_err("%s encrypt failed: %d\n", __func__, ret);
goto out_sgt;
}
*pout_len = ret;
ret = 0;
out_sgt:
teardown_sgtable(&sgt);
return ret;
}
static int ceph_krb5_decrypt(const struct ceph_crypto_key *key, int usage_slot,
void *buf, int buf_len, int in_len, int *pout_len)
{
struct sg_table sgt;
struct scatterlist prealloc_sg;
size_t data_off = 0;
size_t data_len = in_len;
int ret;
if (WARN_ON_ONCE(usage_slot >= ARRAY_SIZE(key->krb5_tfms)))
return -EINVAL;
ret = setup_sgtable(&sgt, &prealloc_sg, buf, in_len);
if (ret)
return ret;
ret = crypto_krb5_decrypt(key->krb5_type, key->krb5_tfms[usage_slot],
sgt.sgl, sgt.nents, &data_off, &data_len);
if (ret) {
pr_err("%s decrypt failed: %d\n", __func__, ret);
goto out_sgt;
}
WARN_ON(data_off != AES_BLOCK_SIZE);
*pout_len = data_len;
out_sgt:
teardown_sgtable(&sgt);
return ret;
}
int ceph_crypt(const struct ceph_crypto_key *key, int usage_slot, bool encrypt,
void *buf, int buf_len, int in_len, int *pout_len)
{
switch (key->type) {
@ -278,11 +396,64 @@ int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt,
case CEPH_CRYPTO_AES:
return ceph_aes_crypt(key, encrypt, buf, buf_len, in_len,
pout_len);
case CEPH_CRYPTO_AES256KRB5:
return encrypt ?
ceph_krb5_encrypt(key, usage_slot, buf, buf_len, in_len,
pout_len) :
ceph_krb5_decrypt(key, usage_slot, buf, buf_len, in_len,
pout_len);
default:
return -ENOTSUPP;
}
}
int ceph_crypt_data_offset(const struct ceph_crypto_key *key)
{
switch (key->type) {
case CEPH_CRYPTO_NONE:
case CEPH_CRYPTO_AES:
return 0;
case CEPH_CRYPTO_AES256KRB5:
/* confounder */
return AES_BLOCK_SIZE;
default:
BUG();
}
}
int ceph_crypt_buflen(const struct ceph_crypto_key *key, int data_len)
{
switch (key->type) {
case CEPH_CRYPTO_NONE:
return data_len;
case CEPH_CRYPTO_AES:
/* PKCS#7 padding at the end */
return data_len + AES_BLOCK_SIZE -
(data_len & (AES_BLOCK_SIZE - 1));
case CEPH_CRYPTO_AES256KRB5:
/* confounder at the beginning and 192-bit HMAC at the end */
return AES_BLOCK_SIZE + data_len + 24;
default:
BUG();
}
}
void ceph_hmac_sha256(const struct ceph_crypto_key *key, const void *buf,
int buf_len, u8 hmac[SHA256_DIGEST_SIZE])
{
switch (key->type) {
case CEPH_CRYPTO_NONE:
case CEPH_CRYPTO_AES:
memset(hmac, 0, SHA256_DIGEST_SIZE);
return;
case CEPH_CRYPTO_AES256KRB5:
hmac_sha256(&key->hmac_key, buf, buf_len, hmac);
return;
default:
BUG();
}
}
static int ceph_key_preparse(struct key_preparsed_payload *prep)
{
struct ceph_crypto_key *ckey;
@ -295,7 +466,7 @@ static int ceph_key_preparse(struct key_preparsed_payload *prep)
goto err;
ret = -ENOMEM;
ckey = kmalloc(sizeof(*ckey), GFP_KERNEL);
ckey = kzalloc(sizeof(*ckey), GFP_KERNEL);
if (!ckey)
goto err;

View file

@ -2,10 +2,11 @@
#ifndef _FS_CEPH_CRYPTO_H
#define _FS_CEPH_CRYPTO_H
#include <crypto/sha2.h>
#include <linux/ceph/types.h>
#include <linux/ceph/buffer.h>
#define CEPH_KEY_LEN 16
#define CEPH_MAX_KEY_LEN 32
#define CEPH_MAX_CON_SECRET_LEN 64
/*
@ -16,9 +17,19 @@ struct ceph_crypto_key {
struct ceph_timespec created;
int len;
void *key;
struct crypto_sync_skcipher *tfm;
union {
struct crypto_sync_skcipher *aes_tfm;
struct {
struct hmac_sha256_key hmac_key;
const struct krb5_enctype *krb5_type;
struct crypto_aead *krb5_tfms[3];
};
};
};
int ceph_crypto_key_prepare(struct ceph_crypto_key *key,
const u32 *key_usages, int key_usage_cnt);
int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
const struct ceph_crypto_key *src);
int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end);
@ -26,8 +37,12 @@ int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *in);
void ceph_crypto_key_destroy(struct ceph_crypto_key *key);
/* crypto.c */
int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt,
int ceph_crypt(const struct ceph_crypto_key *key, int usage_slot, bool encrypt,
void *buf, int buf_len, int in_len, int *pout_len);
int ceph_crypt_data_offset(const struct ceph_crypto_key *key);
int ceph_crypt_buflen(const struct ceph_crypto_key *key, int data_len);
void ceph_hmac_sha256(const struct ceph_crypto_key *key, const void *buf,
int buf_len, u8 hmac[SHA256_DIGEST_SIZE]);
int ceph_crypto_init(void);
void ceph_crypto_shutdown(void);

View file

@ -779,9 +779,9 @@ static int setup_crypto(struct ceph_connection *con,
return 0; /* auth_x, secure mode */
}
static void ceph_hmac_sha256(struct ceph_connection *con,
const struct kvec *kvecs, int kvec_cnt,
u8 hmac[SHA256_DIGEST_SIZE])
static void con_hmac_sha256(struct ceph_connection *con,
const struct kvec *kvecs, int kvec_cnt,
u8 hmac[SHA256_DIGEST_SIZE])
{
struct hmac_sha256_ctx ctx;
int i;
@ -1438,8 +1438,8 @@ static int prepare_auth_signature(struct ceph_connection *con)
if (!buf)
return -ENOMEM;
ceph_hmac_sha256(con, con->v2.in_sign_kvecs, con->v2.in_sign_kvec_cnt,
CTRL_BODY(buf));
con_hmac_sha256(con, con->v2.in_sign_kvecs, con->v2.in_sign_kvec_cnt,
CTRL_BODY(buf));
return prepare_control(con, FRAME_TAG_AUTH_SIGNATURE, buf,
SHA256_DIGEST_SIZE);
@ -2360,7 +2360,7 @@ bad:
*/
static int process_auth_done(struct ceph_connection *con, void *p, void *end)
{
u8 session_key_buf[CEPH_KEY_LEN + 16];
u8 session_key_buf[CEPH_MAX_KEY_LEN + 16];
u8 con_secret_buf[CEPH_MAX_CON_SECRET_LEN + 16];
u8 *session_key = PTR_ALIGN(&session_key_buf[0], 16);
u8 *con_secret = PTR_ALIGN(&con_secret_buf[0], 16);
@ -2436,8 +2436,8 @@ static int process_auth_signature(struct ceph_connection *con,
return -EINVAL;
}
ceph_hmac_sha256(con, con->v2.out_sign_kvecs, con->v2.out_sign_kvec_cnt,
hmac);
con_hmac_sha256(con, con->v2.out_sign_kvecs, con->v2.out_sign_kvec_cnt,
hmac);
ceph_decode_need(&p, end, SHA256_DIGEST_SIZE, bad);
if (crypto_memneq(p, hmac, SHA256_DIGEST_SIZE)) {