bcachefs-tools/libbcachefs/io_types.h

143 lines
2.8 KiB
C
Raw Normal View History

2017-01-08 12:13:18 +03:00
#ifndef _BCACHE_IO_TYPES_H
#define _BCACHE_IO_TYPES_H
#include "btree_types.h"
#include "buckets_types.h"
#include "keylist_types.h"
#include <linux/llist.h>
#include <linux/workqueue.h>
struct bch_read_bio {
/*
* Reads will often have to be split, and if the extent being read from
* was checksummed or compressed we'll also have to allocate bounce
* buffers and copy the data back into the original bio.
*
* If we didn't have to split, we have to save and restore the original
* bi_end_io - @split below indicates which:
*/
union {
struct bch_read_bio *parent;
bio_end_io_t *orig_bi_end_io;
};
/*
* Saved copy of parent->bi_iter, from submission time - allows us to
* resubmit on IO error, and also to copy data back to the original bio
* when we're bouncing:
*/
struct bvec_iter parent_iter;
unsigned submit_time_us;
u16 flags;
u8 bounce:1,
split:1;
2017-03-17 01:51:41 +03:00
struct bch_fs *c;
2017-03-11 00:40:01 +03:00
struct bch_dev *ca;
2017-03-17 01:51:41 +03:00
struct bch_extent_ptr ptr;
struct bch_extent_crc128 crc;
struct bversion version;
2017-01-08 12:13:18 +03:00
struct cache_promote_op *promote;
2017-03-17 01:51:41 +03:00
/*
* If we have to retry the read (IO error, checksum failure, read stale
* data (raced with allocator), we retry the portion of the parent bio
* that failed (i.e. this bio's portion, parent_iter).
*
* But we need to stash the inode somewhere:
*/
u64 inode;
struct work_struct work;
2017-01-08 12:13:18 +03:00
struct bio bio;
};
static inline struct bch_read_bio *
bch2_rbio_parent(struct bch_read_bio *rbio)
2017-01-08 12:13:18 +03:00
{
return rbio->split ? rbio->parent : rbio;
}
struct bch_write_bio {
2017-03-17 01:51:41 +03:00
struct bch_fs *c;
2017-03-11 00:40:01 +03:00
struct bch_dev *ca;
2017-01-08 12:13:18 +03:00
union {
struct bch_write_bio *parent;
struct closure *cl;
2017-01-08 12:13:18 +03:00
};
u8 ptr_idx;
u8 replicas_failed;
u8 order;
2017-01-08 12:13:18 +03:00
unsigned split:1,
bounce:1,
2017-04-04 10:05:13 +03:00
put_bio:1,
have_io_ref:1,
used_mempool:1;
2017-01-08 12:13:18 +03:00
unsigned submit_time_us;
void *data;
2017-01-08 12:13:18 +03:00
struct bio bio;
};
struct bch_write_op {
struct closure cl;
struct bch_fs *c;
2017-01-08 12:13:18 +03:00
struct workqueue_struct *io_wq;
unsigned written; /* sectors */
short error;
u16 flags;
2016-10-04 06:22:17 +03:00
unsigned csum_type:4;
2017-01-08 12:13:18 +03:00
unsigned compression_type:4;
unsigned nr_replicas:4;
unsigned alloc_reserve:4;
2016-10-04 06:22:17 +03:00
unsigned nonce:14;
2017-01-08 12:13:18 +03:00
struct bpos pos;
2016-10-04 06:22:17 +03:00
struct bversion version;
2017-01-08 12:13:18 +03:00
/* For BCH_WRITE_DATA_COMPRESSED: */
2016-10-04 06:22:17 +03:00
struct bch_extent_crc128 crc;
2017-01-08 12:13:18 +03:00
unsigned size;
struct disk_reservation res;
struct write_point *wp;
union {
u8 open_buckets[16];
struct {
struct bch_write_op *next;
unsigned long expires;
};
};
/*
* If caller wants to flush but hasn't passed us a journal_seq ptr, we
* still need to stash the journal_seq somewhere:
*/
union {
u64 *journal_seq_p;
u64 journal_seq;
};
int (*index_update_fn)(struct bch_write_op *);
struct keylist insert_keys;
u64 inline_keys[BKEY_EXTENT_U64s_MAX * 2];
/* Must be last: */
struct bch_write_bio wbio;
2017-01-08 12:13:18 +03:00
};
#endif /* _BCACHE_IO_TYPES_H */