3
0
mirror of https://github.com/Qortal/Brooklyn.git synced 2025-02-11 09:45:55 +00:00

mega update for sean's poose release

This commit is contained in:
balloonatic 2022-12-14 11:30:32 +05:00
parent b59e9362c3
commit 39b93551ac
9939 changed files with 647569 additions and 160369 deletions

View File

@ -26,6 +26,7 @@
#include <linux/namei.h> #include <linux/namei.h>
#include <linux/part_stat.h> #include <linux/part_stat.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/stat.h>
#include "../fs/internal.h" #include "../fs/internal.h"
#include "blk.h" #include "blk.h"
@ -1069,3 +1070,25 @@ void sync_bdevs(bool wait)
spin_unlock(&blockdev_superblock->s_inode_list_lock); spin_unlock(&blockdev_superblock->s_inode_list_lock);
iput(old_inode); iput(old_inode);
} }
/*
* Handle STATX_DIOALIGN for block devices.
*
* Note that the inode passed to this is the inode of a block device node file,
* not the block device's internal inode. Therefore it is *not* valid to use
* I_BDEV() here; the block device has to be looked up by i_rdev instead.
*/
void bdev_statx_dioalign(struct inode *inode, struct kstat *stat)
{
struct block_device *bdev;
bdev = blkdev_get_no_open(inode->i_rdev);
if (!bdev)
return;
stat->dio_mem_align = bdev_dma_alignment(bdev) + 1;
stat->dio_offset_align = bdev_logical_block_size(bdev);
stat->result_mask |= STATX_DIOALIGN;
blkdev_put_no_open(bdev);
}

View File

@ -254,17 +254,12 @@ void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
#else /* CONFIG_BFQ_CGROUP_DEBUG */ #else /* CONFIG_BFQ_CGROUP_DEBUG */
void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
blk_opf_t opf) { }
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { } void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { }
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) { } void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) { }
void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
u64 io_start_time_ns, blk_opf_t opf) { } u64 io_start_time_ns, blk_opf_t opf) { }
void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { } void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { }
void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { }
void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { } void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { }
#endif /* CONFIG_BFQ_CGROUP_DEBUG */ #endif /* CONFIG_BFQ_CGROUP_DEBUG */
@ -615,6 +610,10 @@ struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
struct bfq_group *bfqg; struct bfq_group *bfqg;
while (blkg) { while (blkg) {
if (!blkg->online) {
blkg = blkg->parent;
continue;
}
bfqg = blkg_to_bfqg(blkg); bfqg = blkg_to_bfqg(blkg);
if (bfqg->online) { if (bfqg->online) {
bio_associate_blkg_from_css(bio, &blkg->blkcg->css); bio_associate_blkg_from_css(bio, &blkg->blkcg->css);

View File

@ -1925,7 +1925,7 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
bfqq->service_from_backlogged = 0; bfqq->service_from_backlogged = 0;
bfq_clear_bfqq_softrt_update(bfqq); bfq_clear_bfqq_softrt_update(bfqq);
bfq_add_bfqq_busy(bfqd, bfqq); bfq_add_bfqq_busy(bfqq);
/* /*
* Expire in-service queue if preemption may be needed for * Expire in-service queue if preemption may be needed for
@ -2419,7 +2419,7 @@ static void bfq_remove_request(struct request_queue *q,
bfqq->next_rq = NULL; bfqq->next_rq = NULL;
if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) { if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) {
bfq_del_bfqq_busy(bfqd, bfqq, false); bfq_del_bfqq_busy(bfqq, false);
/* /*
* bfqq emptied. In normal operation, when * bfqq emptied. In normal operation, when
* bfqq is empty, bfqq->entity.service and * bfqq is empty, bfqq->entity.service and
@ -3098,7 +3098,7 @@ void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq)
*/ */
if (bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) && if (bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) &&
bfqq != bfqd->in_service_queue) bfqq != bfqd->in_service_queue)
bfq_del_bfqq_busy(bfqd, bfqq, false); bfq_del_bfqq_busy(bfqq, false);
bfq_reassign_last_bfqq(bfqq, NULL); bfq_reassign_last_bfqq(bfqq, NULL);
@ -3908,7 +3908,7 @@ static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
*/ */
bfqq->budget_timeout = jiffies; bfqq->budget_timeout = jiffies;
bfq_del_bfqq_busy(bfqd, bfqq, true); bfq_del_bfqq_busy(bfqq, true);
} else { } else {
bfq_requeue_bfqq(bfqd, bfqq, true); bfq_requeue_bfqq(bfqd, bfqq, true);
/* /*
@ -5255,9 +5255,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
struct hlist_node *n; struct hlist_node *n;
struct bfq_group *bfqg = bfqq_group(bfqq); struct bfq_group *bfqg = bfqq_group(bfqq);
if (bfqq->bfqd) bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d", bfqq, bfqq->ref);
bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d",
bfqq, bfqq->ref);
bfqq->ref--; bfqq->ref--;
if (bfqq->ref) if (bfqq->ref)
@ -5321,7 +5319,7 @@ void bfq_put_queue(struct bfq_queue *bfqq)
hlist_del_init(&item->woken_list_node); hlist_del_init(&item->woken_list_node);
} }
if (bfqq->bfqd && bfqq->bfqd->last_completed_rq_bfqq == bfqq) if (bfqq->bfqd->last_completed_rq_bfqq == bfqq)
bfqq->bfqd->last_completed_rq_bfqq = NULL; bfqq->bfqd->last_completed_rq_bfqq = NULL;
kmem_cache_free(bfq_pool, bfqq); kmem_cache_free(bfq_pool, bfqq);

View File

@ -369,12 +369,8 @@ struct bfq_queue {
unsigned long split_time; /* time of last split */ unsigned long split_time; /* time of last split */
unsigned long first_IO_time; /* time of first I/O for this queue */ unsigned long first_IO_time; /* time of first I/O for this queue */
unsigned long creation_time; /* when this queue is created */ unsigned long creation_time; /* when this queue is created */
/* max service rate measured so far */
u32 max_service_rate;
/* /*
* Pointer to the waker queue for this queue, i.e., to the * Pointer to the waker queue for this queue, i.e., to the
* queue Q such that this queue happens to get new I/O right * queue Q such that this queue happens to get new I/O right
@ -993,20 +989,23 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
/* ---------------- cgroups-support interface ---------------- */ /* ---------------- cgroups-support interface ---------------- */
void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq); void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq);
void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
blk_opf_t opf);
void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf); void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf);
void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf); void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf);
void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
u64 io_start_time_ns, blk_opf_t opf); u64 io_start_time_ns, blk_opf_t opf);
void bfqg_stats_update_dequeue(struct bfq_group *bfqg); void bfqg_stats_update_dequeue(struct bfq_group *bfqg);
void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg);
void bfqg_stats_update_idle_time(struct bfq_group *bfqg);
void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg); void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg);
void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg);
void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
struct bfq_group *bfqg); struct bfq_group *bfqg);
#ifdef CONFIG_BFQ_CGROUP_DEBUG
void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
blk_opf_t opf);
void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg);
void bfqg_stats_update_idle_time(struct bfq_group *bfqg);
void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg);
#endif
void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg); void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg);
void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio); void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio);
void bfq_end_wr_async(struct bfq_data *bfqd); void bfq_end_wr_async(struct bfq_data *bfqd);
@ -1077,9 +1076,8 @@ void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq); void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq);
void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bool expiration); bool expiration);
void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, void bfq_del_bfqq_busy(struct bfq_queue *bfqq, bool expiration);
bool expiration); void bfq_add_bfqq_busy(struct bfq_queue *bfqq);
void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq);
/* --------------- end of interface of B-WF2Q+ ---------------- */ /* --------------- end of interface of B-WF2Q+ ---------------- */

View File

@ -1651,9 +1651,10 @@ void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
* the service tree. As a special case, it can be invoked during an * the service tree. As a special case, it can be invoked during an
* expiration. * expiration.
*/ */
void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, void bfq_del_bfqq_busy(struct bfq_queue *bfqq, bool expiration)
bool expiration)
{ {
struct bfq_data *bfqd = bfqq->bfqd;
bfq_log_bfqq(bfqd, bfqq, "del from busy"); bfq_log_bfqq(bfqd, bfqq, "del from busy");
bfq_clear_bfqq_busy(bfqq); bfq_clear_bfqq_busy(bfqq);
@ -1674,8 +1675,10 @@ void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
/* /*
* Called when an inactive queue receives a new request. * Called when an inactive queue receives a new request.
*/ */
void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq) void bfq_add_bfqq_busy(struct bfq_queue *bfqq)
{ {
struct bfq_data *bfqd = bfqq->bfqd;
bfq_log_bfqq(bfqd, bfqq, "add to busy"); bfq_log_bfqq(bfqd, bfqq, "add to busy");
bfq_activate_bfqq(bfqd, bfqq); bfq_activate_bfqq(bfqd, bfqq);

View File

@ -567,7 +567,7 @@ EXPORT_SYMBOL(bio_alloc_bioset);
* be reused by calling bio_uninit() before calling bio_init() again. * be reused by calling bio_uninit() before calling bio_init() again.
* *
* Note that unlike bio_alloc() or bio_alloc_bioset() allocations from this * Note that unlike bio_alloc() or bio_alloc_bioset() allocations from this
* function are not backed by a mempool can can fail. Do not use this function * function are not backed by a mempool can fail. Do not use this function
* for allocations in the file system I/O path. * for allocations in the file system I/O path.
* *
* Returns: Pointer to new bio on success, NULL on failure. * Returns: Pointer to new bio on success, NULL on failure.
@ -741,7 +741,7 @@ void bio_put(struct bio *bio)
return; return;
} }
if (bio->bi_opf & REQ_ALLOC_CACHE) { if ((bio->bi_opf & REQ_ALLOC_CACHE) && !WARN_ON_ONCE(in_interrupt())) {
struct bio_alloc_cache *cache; struct bio_alloc_cache *cache;
bio_uninit(bio); bio_uninit(bio);
@ -760,8 +760,6 @@ EXPORT_SYMBOL(bio_put);
static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp) static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
{ {
bio_set_flag(bio, BIO_CLONED); bio_set_flag(bio, BIO_CLONED);
if (bio_flagged(bio_src, BIO_THROTTLED))
bio_set_flag(bio, BIO_THROTTLED);
bio->bi_ioprio = bio_src->bi_ioprio; bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_iter = bio_src->bi_iter; bio->bi_iter = bio_src->bi_iter;
@ -869,6 +867,8 @@ static inline bool page_is_mergeable(const struct bio_vec *bv,
*same_page = ((vec_end_addr & PAGE_MASK) == page_addr); *same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
if (*same_page) if (*same_page)
return true; return true;
else if (IS_ENABLED(CONFIG_KMSAN))
return false;
return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE); return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE);
} }
@ -1065,9 +1065,6 @@ void __bio_add_page(struct bio *bio, struct page *page,
bio->bi_iter.bi_size += len; bio->bi_iter.bi_size += len;
bio->bi_vcnt++; bio->bi_vcnt++;
if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page)))
bio_set_flag(bio, BIO_WORKINGSET);
} }
EXPORT_SYMBOL_GPL(__bio_add_page); EXPORT_SYMBOL_GPL(__bio_add_page);
@ -1276,9 +1273,6 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
* fit into the bio, or are requested in @iter, whatever is smaller. If * fit into the bio, or are requested in @iter, whatever is smaller. If
* MM encounters an error pinning the requested pages, it stops. Error * MM encounters an error pinning the requested pages, it stops. Error
* is returned only if 0 pages could be pinned. * is returned only if 0 pages could be pinned.
*
* It's intended for direct IO, so doesn't do PSI tracking, the caller is
* responsible for setting BIO_WORKINGSET if necessary.
*/ */
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
{ {
@ -1294,8 +1288,6 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
ret = __bio_iov_iter_get_pages(bio, iter); ret = __bio_iov_iter_get_pages(bio, iter);
} while (!ret && iov_iter_count(iter) && !bio_full(bio, 0)); } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0));
/* don't account direct I/O as memory stall */
bio_clear_flag(bio, BIO_WORKINGSET);
return bio->bi_vcnt ? 0 : ret; return bio->bi_vcnt ? 0 : ret;
} }
EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages); EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
@ -1754,7 +1746,8 @@ static int __init init_bio(void)
cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL, cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL,
bio_cpu_dead); bio_cpu_dead);
if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS)) if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0,
BIOSET_NEED_BVECS | BIOSET_PERCPU_CACHE))
panic("bio: can't allocate bios\n"); panic("bio: can't allocate bios\n");
if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE)) if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE))

View File

@ -19,8 +19,8 @@ int blkcg_set_fc_appid(char *app_id, u64 cgrp_id, size_t app_id_len)
return -EINVAL; return -EINVAL;
cgrp = cgroup_get_from_id(cgrp_id); cgrp = cgroup_get_from_id(cgrp_id);
if (!cgrp) if (IS_ERR(cgrp))
return -ENOENT; return PTR_ERR(cgrp);
css = cgroup_get_e_css(cgrp, &io_cgrp_subsys); css = cgroup_get_e_css(cgrp, &io_cgrp_subsys);
if (!css) { if (!css) {
ret = -ENOENT; ret = -ENOENT;

View File

@ -202,19 +202,19 @@ static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
/** /**
* blkg_alloc - allocate a blkg * blkg_alloc - allocate a blkg
* @blkcg: block cgroup the new blkg is associated with * @blkcg: block cgroup the new blkg is associated with
* @q: request_queue the new blkg is associated with * @disk: gendisk the new blkg is associated with
* @gfp_mask: allocation mask to use * @gfp_mask: allocation mask to use
* *
* Allocate a new blkg assocating @blkcg and @q. * Allocate a new blkg assocating @blkcg and @q.
*/ */
static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct gendisk *disk,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
int i, cpu; int i, cpu;
/* alloc and init base part */ /* alloc and init base part */
blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); blkg = kzalloc_node(sizeof(*blkg), gfp_mask, disk->queue->node);
if (!blkg) if (!blkg)
return NULL; return NULL;
@ -225,10 +225,10 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
if (!blkg->iostat_cpu) if (!blkg->iostat_cpu)
goto err_free; goto err_free;
if (!blk_get_queue(q)) if (!blk_get_queue(disk->queue))
goto err_free; goto err_free;
blkg->q = q; blkg->q = disk->queue;
INIT_LIST_HEAD(&blkg->q_node); INIT_LIST_HEAD(&blkg->q_node);
spin_lock_init(&blkg->async_bio_lock); spin_lock_init(&blkg->async_bio_lock);
bio_list_init(&blkg->async_bios); bio_list_init(&blkg->async_bios);
@ -243,11 +243,11 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
struct blkcg_policy *pol = blkcg_policy[i]; struct blkcg_policy *pol = blkcg_policy[i];
struct blkg_policy_data *pd; struct blkg_policy_data *pd;
if (!blkcg_policy_enabled(q, pol)) if (!blkcg_policy_enabled(disk->queue, pol))
continue; continue;
/* alloc per-policy data and attach it to blkg */ /* alloc per-policy data and attach it to blkg */
pd = pol->pd_alloc_fn(gfp_mask, q, blkcg); pd = pol->pd_alloc_fn(gfp_mask, disk->queue, blkcg);
if (!pd) if (!pd)
goto err_free; goto err_free;
@ -263,45 +263,20 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
return NULL; return NULL;
} }
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
struct request_queue *q, bool update_hint)
{
struct blkcg_gq *blkg;
/*
* Hint didn't match. Look up from the radix tree. Note that the
* hint can only be updated under queue_lock as otherwise @blkg
* could have already been removed from blkg_tree. The caller is
* responsible for grabbing queue_lock if @update_hint.
*/
blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
if (blkg && blkg->q == q) {
if (update_hint) {
lockdep_assert_held(&q->queue_lock);
rcu_assign_pointer(blkcg->blkg_hint, blkg);
}
return blkg;
}
return NULL;
}
EXPORT_SYMBOL_GPL(blkg_lookup_slowpath);
/* /*
* If @new_blkg is %NULL, this function tries to allocate a new one as * If @new_blkg is %NULL, this function tries to allocate a new one as
* necessary using %GFP_NOWAIT. @new_blkg is always consumed on return. * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return.
*/ */
static struct blkcg_gq *blkg_create(struct blkcg *blkcg, static struct blkcg_gq *blkg_create(struct blkcg *blkcg, struct gendisk *disk,
struct request_queue *q,
struct blkcg_gq *new_blkg) struct blkcg_gq *new_blkg)
{ {
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
int i, ret; int i, ret;
lockdep_assert_held(&q->queue_lock); lockdep_assert_held(&disk->queue->queue_lock);
/* request_queue is dying, do not create/recreate a blkg */ /* request_queue is dying, do not create/recreate a blkg */
if (blk_queue_dying(q)) { if (blk_queue_dying(disk->queue)) {
ret = -ENODEV; ret = -ENODEV;
goto err_free_blkg; goto err_free_blkg;
} }
@ -314,7 +289,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
/* allocate */ /* allocate */
if (!new_blkg) { if (!new_blkg) {
new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN); new_blkg = blkg_alloc(blkcg, disk, GFP_NOWAIT | __GFP_NOWARN);
if (unlikely(!new_blkg)) { if (unlikely(!new_blkg)) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_put_css; goto err_put_css;
@ -324,7 +299,7 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
/* link parent */ /* link parent */
if (blkcg_parent(blkcg)) { if (blkcg_parent(blkcg)) {
blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); blkg->parent = blkg_lookup(blkcg_parent(blkcg), disk->queue);
if (WARN_ON_ONCE(!blkg->parent)) { if (WARN_ON_ONCE(!blkg->parent)) {
ret = -ENODEV; ret = -ENODEV;
goto err_put_css; goto err_put_css;
@ -342,10 +317,10 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
/* insert */ /* insert */
spin_lock(&blkcg->lock); spin_lock(&blkcg->lock);
ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); ret = radix_tree_insert(&blkcg->blkg_tree, disk->queue->id, blkg);
if (likely(!ret)) { if (likely(!ret)) {
hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
list_add(&blkg->q_node, &q->blkg_list); list_add(&blkg->q_node, &disk->queue->blkg_list);
for (i = 0; i < BLKCG_MAX_POLS; i++) { for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i]; struct blkcg_policy *pol = blkcg_policy[i];
@ -374,19 +349,20 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
/** /**
* blkg_lookup_create - lookup blkg, try to create one if not there * blkg_lookup_create - lookup blkg, try to create one if not there
* @blkcg: blkcg of interest * @blkcg: blkcg of interest
* @q: request_queue of interest * @disk: gendisk of interest
* *
* Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to * Lookup blkg for the @blkcg - @disk pair. If it doesn't exist, try to
* create one. blkg creation is performed recursively from blkcg_root such * create one. blkg creation is performed recursively from blkcg_root such
* that all non-root blkg's have access to the parent blkg. This function * that all non-root blkg's have access to the parent blkg. This function
* should be called under RCU read lock and takes @q->queue_lock. * should be called under RCU read lock and takes @disk->queue->queue_lock.
* *
* Returns the blkg or the closest blkg if blkg_create() fails as it walks * Returns the blkg or the closest blkg if blkg_create() fails as it walks
* down from root. * down from root.
*/ */
static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q) struct gendisk *disk)
{ {
struct request_queue *q = disk->queue;
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
unsigned long flags; unsigned long flags;
@ -397,9 +373,13 @@ static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
return blkg; return blkg;
spin_lock_irqsave(&q->queue_lock, flags); spin_lock_irqsave(&q->queue_lock, flags);
blkg = __blkg_lookup(blkcg, q, true); blkg = blkg_lookup(blkcg, q);
if (blkg) if (blkg) {
if (blkcg != &blkcg_root &&
blkg != rcu_dereference(blkcg->blkg_hint))
rcu_assign_pointer(blkcg->blkg_hint, blkg);
goto found; goto found;
}
/* /*
* Create blkgs walking down from blkcg_root to @blkcg, so that all * Create blkgs walking down from blkcg_root to @blkcg, so that all
@ -412,7 +392,7 @@ static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct blkcg_gq *ret_blkg = q->root_blkg; struct blkcg_gq *ret_blkg = q->root_blkg;
while (parent) { while (parent) {
blkg = __blkg_lookup(parent, q, false); blkg = blkg_lookup(parent, q);
if (blkg) { if (blkg) {
/* remember closest blkg */ /* remember closest blkg */
ret_blkg = blkg; ret_blkg = blkg;
@ -422,7 +402,7 @@ static struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
parent = blkcg_parent(parent); parent = blkcg_parent(parent);
} }
blkg = blkg_create(pos, q, NULL); blkg = blkg_create(pos, disk, NULL);
if (IS_ERR(blkg)) { if (IS_ERR(blkg)) {
blkg = ret_blkg; blkg = ret_blkg;
break; break;
@ -476,14 +456,9 @@ static void blkg_destroy(struct blkcg_gq *blkg)
percpu_ref_kill(&blkg->refcnt); percpu_ref_kill(&blkg->refcnt);
} }
/** static void blkg_destroy_all(struct gendisk *disk)
* blkg_destroy_all - destroy all blkgs associated with a request_queue
* @q: request_queue of interest
*
* Destroy all blkgs associated with @q.
*/
static void blkg_destroy_all(struct request_queue *q)
{ {
struct request_queue *q = disk->queue;
struct blkcg_gq *blkg, *n; struct blkcg_gq *blkg, *n;
int count = BLKG_DESTROY_BATCH_SIZE; int count = BLKG_DESTROY_BATCH_SIZE;
@ -616,19 +591,6 @@ u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v)
} }
EXPORT_SYMBOL_GPL(__blkg_prfill_u64); EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
/* Performs queue bypass and policy enabled checks then looks up blkg. */
static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
const struct blkcg_policy *pol,
struct request_queue *q)
{
WARN_ON_ONCE(!rcu_read_lock_held());
lockdep_assert_held(&q->queue_lock);
if (!blkcg_policy_enabled(q, pol))
return ERR_PTR(-EOPNOTSUPP);
return __blkg_lookup(blkcg, q, true /* update_hint */);
}
/** /**
* blkcg_conf_open_bdev - parse and open bdev for per-blkg config update * blkcg_conf_open_bdev - parse and open bdev for per-blkg config update
* @inputp: input string pointer * @inputp: input string pointer
@ -684,6 +646,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
__acquires(rcu) __acquires(&bdev->bd_queue->queue_lock) __acquires(rcu) __acquires(&bdev->bd_queue->queue_lock)
{ {
struct block_device *bdev; struct block_device *bdev;
struct gendisk *disk;
struct request_queue *q; struct request_queue *q;
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
int ret; int ret;
@ -691,8 +654,8 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
bdev = blkcg_conf_open_bdev(&input); bdev = blkcg_conf_open_bdev(&input);
if (IS_ERR(bdev)) if (IS_ERR(bdev))
return PTR_ERR(bdev); return PTR_ERR(bdev);
disk = bdev->bd_disk;
q = bdev_get_queue(bdev); q = disk->queue;
/* /*
* blkcg_deactivate_policy() requires queue to be frozen, we can grab * blkcg_deactivate_policy() requires queue to be frozen, we can grab
@ -705,12 +668,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
rcu_read_lock(); rcu_read_lock();
spin_lock_irq(&q->queue_lock); spin_lock_irq(&q->queue_lock);
blkg = blkg_lookup_check(blkcg, pol, q); if (!blkcg_policy_enabled(q, pol)) {
if (IS_ERR(blkg)) { ret = -EOPNOTSUPP;
ret = PTR_ERR(blkg);
goto fail_unlock; goto fail_unlock;
} }
blkg = blkg_lookup(blkcg, q);
if (blkg) if (blkg)
goto success; goto success;
@ -724,7 +687,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
struct blkcg_gq *new_blkg; struct blkcg_gq *new_blkg;
parent = blkcg_parent(blkcg); parent = blkcg_parent(blkcg);
while (parent && !__blkg_lookup(parent, q, false)) { while (parent && !blkg_lookup(parent, q)) {
pos = parent; pos = parent;
parent = blkcg_parent(parent); parent = blkcg_parent(parent);
} }
@ -733,7 +696,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
spin_unlock_irq(&q->queue_lock); spin_unlock_irq(&q->queue_lock);
rcu_read_unlock(); rcu_read_unlock();
new_blkg = blkg_alloc(pos, q, GFP_KERNEL); new_blkg = blkg_alloc(pos, disk, GFP_KERNEL);
if (unlikely(!new_blkg)) { if (unlikely(!new_blkg)) {
ret = -ENOMEM; ret = -ENOMEM;
goto fail_exit_queue; goto fail_exit_queue;
@ -748,17 +711,17 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
rcu_read_lock(); rcu_read_lock();
spin_lock_irq(&q->queue_lock); spin_lock_irq(&q->queue_lock);
blkg = blkg_lookup_check(pos, pol, q); if (!blkcg_policy_enabled(q, pol)) {
if (IS_ERR(blkg)) {
ret = PTR_ERR(blkg);
blkg_free(new_blkg); blkg_free(new_blkg);
ret = -EOPNOTSUPP;
goto fail_preloaded; goto fail_preloaded;
} }
blkg = blkg_lookup(pos, q);
if (blkg) { if (blkg) {
blkg_free(new_blkg); blkg_free(new_blkg);
} else { } else {
blkg = blkg_create(pos, q, new_blkg); blkg = blkg_create(pos, disk, new_blkg);
if (IS_ERR(blkg)) { if (IS_ERR(blkg)) {
ret = PTR_ERR(blkg); ret = PTR_ERR(blkg);
goto fail_preloaded; goto fail_preloaded;
@ -915,8 +878,7 @@ static void blkcg_fill_root_iostats(void)
class_dev_iter_init(&iter, &block_class, NULL, &disk_type); class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
while ((dev = class_dev_iter_next(&iter))) { while ((dev = class_dev_iter_next(&iter))) {
struct block_device *bdev = dev_to_bdev(dev); struct block_device *bdev = dev_to_bdev(dev);
struct blkcg_gq *blkg = struct blkcg_gq *blkg = bdev->bd_disk->queue->root_blkg;
blk_queue_root_blkg(bdev_get_queue(bdev));
struct blkg_iostat tmp; struct blkg_iostat tmp;
int cpu; int cpu;
unsigned long flags; unsigned long flags;
@ -1251,29 +1213,20 @@ static int blkcg_css_online(struct cgroup_subsys_state *css)
* parent so that offline always happens towards the root. * parent so that offline always happens towards the root.
*/ */
if (parent) if (parent)
blkcg_pin_online(css); blkcg_pin_online(&parent->css);
return 0; return 0;
} }
/** int blkcg_init_disk(struct gendisk *disk)
* blkcg_init_queue - initialize blkcg part of request queue
* @q: request_queue to initialize
*
* Called from blk_alloc_queue(). Responsible for initializing blkcg
* part of new request_queue @q.
*
* RETURNS:
* 0 on success, -errno on failure.
*/
int blkcg_init_queue(struct request_queue *q)
{ {
struct request_queue *q = disk->queue;
struct blkcg_gq *new_blkg, *blkg; struct blkcg_gq *new_blkg, *blkg;
bool preloaded; bool preloaded;
int ret; int ret;
INIT_LIST_HEAD(&q->blkg_list); INIT_LIST_HEAD(&q->blkg_list);
new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
if (!new_blkg) if (!new_blkg)
return -ENOMEM; return -ENOMEM;
@ -1282,7 +1235,7 @@ int blkcg_init_queue(struct request_queue *q)
/* Make sure the root blkg exists. */ /* Make sure the root blkg exists. */
/* spin_lock_irq can serve as RCU read-side critical section. */ /* spin_lock_irq can serve as RCU read-side critical section. */
spin_lock_irq(&q->queue_lock); spin_lock_irq(&q->queue_lock);
blkg = blkg_create(&blkcg_root, q, new_blkg); blkg = blkg_create(&blkcg_root, disk, new_blkg);
if (IS_ERR(blkg)) if (IS_ERR(blkg))
goto err_unlock; goto err_unlock;
q->root_blkg = blkg; q->root_blkg = blkg;
@ -1291,25 +1244,26 @@ int blkcg_init_queue(struct request_queue *q)
if (preloaded) if (preloaded)
radix_tree_preload_end(); radix_tree_preload_end();
ret = blk_ioprio_init(q); ret = blk_ioprio_init(disk);
if (ret) if (ret)
goto err_destroy_all; goto err_destroy_all;
ret = blk_throtl_init(q); ret = blk_throtl_init(disk);
if (ret) if (ret)
goto err_destroy_all; goto err_ioprio_exit;
ret = blk_iolatency_init(q); ret = blk_iolatency_init(disk);
if (ret) { if (ret)
blk_throtl_exit(q); goto err_throtl_exit;
blk_ioprio_exit(q);
goto err_destroy_all;
}
return 0; return 0;
err_throtl_exit:
blk_throtl_exit(disk);
err_ioprio_exit:
blk_ioprio_exit(disk);
err_destroy_all: err_destroy_all:
blkg_destroy_all(q); blkg_destroy_all(disk);
return ret; return ret;
err_unlock: err_unlock:
spin_unlock_irq(&q->queue_lock); spin_unlock_irq(&q->queue_lock);
@ -1318,16 +1272,10 @@ int blkcg_init_queue(struct request_queue *q)
return PTR_ERR(blkg); return PTR_ERR(blkg);
} }
/** void blkcg_exit_disk(struct gendisk *disk)
* blkcg_exit_queue - exit and release blkcg part of request_queue
* @q: request_queue being released
*
* Called from blk_exit_queue(). Responsible for exiting blkcg part.
*/
void blkcg_exit_queue(struct request_queue *q)
{ {
blkg_destroy_all(q); blkg_destroy_all(disk);
blk_throtl_exit(q); blk_throtl_exit(disk);
} }
static void blkcg_bind(struct cgroup_subsys_state *root_css) static void blkcg_bind(struct cgroup_subsys_state *root_css)
@ -1836,13 +1784,13 @@ void blkcg_maybe_throttle_current(void)
/** /**
* blkcg_schedule_throttle - this task needs to check for throttling * blkcg_schedule_throttle - this task needs to check for throttling
* @q: the request queue IO was submitted on * @gendisk: disk to throttle
* @use_memdelay: do we charge this to memory delay for PSI * @use_memdelay: do we charge this to memory delay for PSI
* *
* This is called by the IO controller when we know there's delay accumulated * This is called by the IO controller when we know there's delay accumulated
* for the blkg for this task. We do not pass the blkg because there are places * for the blkg for this task. We do not pass the blkg because there are places
* we call this that may not have that information, the swapping code for * we call this that may not have that information, the swapping code for
* instance will only have a request_queue at that point. This set's the * instance will only have a block_device at that point. This set's the
* notify_resume for the task to check and see if it requires throttling before * notify_resume for the task to check and see if it requires throttling before
* returning to user space. * returning to user space.
* *
@ -1851,8 +1799,10 @@ void blkcg_maybe_throttle_current(void)
* throttle once. If the task needs to be throttled again it'll need to be * throttle once. If the task needs to be throttled again it'll need to be
* re-set at the next time we see the task. * re-set at the next time we see the task.
*/ */
void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) void blkcg_schedule_throttle(struct gendisk *disk, bool use_memdelay)
{ {
struct request_queue *q = disk->queue;
if (unlikely(current->flags & PF_KTHREAD)) if (unlikely(current->flags & PF_KTHREAD))
return; return;
@ -1902,8 +1852,7 @@ static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio,
struct blkcg_gq *blkg, *ret_blkg = NULL; struct blkcg_gq *blkg, *ret_blkg = NULL;
rcu_read_lock(); rcu_read_lock();
blkg = blkg_lookup_create(css_to_blkcg(css), blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_bdev->bd_disk);
bdev_get_queue(bio->bi_bdev));
while (blkg) { while (blkg) {
if (blkg_tryget(blkg)) { if (blkg_tryget(blkg)) {
ret_blkg = blkg; ret_blkg = blkg;

View File

@ -178,10 +178,8 @@ struct blkcg_policy {
extern struct blkcg blkcg_root; extern struct blkcg blkcg_root;
extern bool blkcg_debug_stats; extern bool blkcg_debug_stats;
struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, int blkcg_init_disk(struct gendisk *disk);
struct request_queue *q, bool update_hint); void blkcg_exit_disk(struct gendisk *disk);
int blkcg_init_queue(struct request_queue *q);
void blkcg_exit_queue(struct request_queue *q);
/* Blkio controller policy registration */ /* Blkio controller policy registration */
int blkcg_policy_register(struct blkcg_policy *pol); int blkcg_policy_register(struct blkcg_policy *pol);
@ -227,22 +225,21 @@ static inline bool bio_issue_as_root_blkg(struct bio *bio)
} }
/** /**
* __blkg_lookup - internal version of blkg_lookup() * blkg_lookup - lookup blkg for the specified blkcg - q pair
* @blkcg: blkcg of interest * @blkcg: blkcg of interest
* @q: request_queue of interest * @q: request_queue of interest
* @update_hint: whether to update lookup hint with the result or not
* *
* This is internal version and shouldn't be used by policy * Lookup blkg for the @blkcg - @q pair.
* implementations. Looks up blkgs for the @blkcg - @q pair regardless of
* @q's bypass state. If @update_hint is %true, the caller should be * Must be called in a RCU critical section.
* holding @q->queue_lock and lookup hint is updated on success.
*/ */
static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
struct request_queue *q, struct request_queue *q)
bool update_hint)
{ {
struct blkcg_gq *blkg; struct blkcg_gq *blkg;
WARN_ON_ONCE(!rcu_read_lock_held());
if (blkcg == &blkcg_root) if (blkcg == &blkcg_root)
return q->root_blkg; return q->root_blkg;
@ -250,33 +247,10 @@ static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
if (blkg && blkg->q == q) if (blkg && blkg->q == q)
return blkg; return blkg;
return blkg_lookup_slowpath(blkcg, q, update_hint); blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
} if (blkg && blkg->q != q)
blkg = NULL;
/** return blkg;
* blkg_lookup - lookup blkg for the specified blkcg - q pair
* @blkcg: blkcg of interest
* @q: request_queue of interest
*
* Lookup blkg for the @blkcg - @q pair. This function should be called
* under RCU read lock.
*/
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
struct request_queue *q)
{
WARN_ON_ONCE(!rcu_read_lock_held());
return __blkg_lookup(blkcg, q, false);
}
/**
* blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
* @q: request_queue of interest
*
* Lookup blkg for @q at the root level. See also blkg_lookup().
*/
static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
{
return q->root_blkg;
} }
/** /**
@ -373,8 +347,8 @@ static inline void blkg_put(struct blkcg_gq *blkg)
*/ */
#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \
(p_blkg)->q, false))) (p_blkg)->q)))
/** /**
* blkg_for_each_descendant_post - post-order walk of a blkg's descendants * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
@ -388,8 +362,8 @@ static inline void blkg_put(struct blkcg_gq *blkg)
*/ */
#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ if (((d_blkg) = blkg_lookup(css_to_blkcg(pos_css), \
(p_blkg)->q, false))) (p_blkg)->q)))
bool __blkcg_punt_bio_submit(struct bio *bio); bool __blkcg_punt_bio_submit(struct bio *bio);
@ -507,10 +481,8 @@ struct blkcg {
}; };
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) static inline int blkcg_init_disk(struct gendisk *disk) { return 0; }
{ return NULL; } static inline void blkcg_exit_disk(struct gendisk *disk) { }
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
static inline void blkcg_exit_queue(struct request_queue *q) { }
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
static inline int blkcg_activate_policy(struct request_queue *q, static inline int blkcg_activate_policy(struct request_queue *q,

View File

@ -37,7 +37,6 @@
#include <linux/t10-pi.h> #include <linux/t10-pi.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/psi.h>
#include <linux/part_stat.h> #include <linux/part_stat.h>
#include <linux/sched/sysctl.h> #include <linux/sched/sysctl.h>
#include <linux/blk-crypto.h> #include <linux/blk-crypto.h>
@ -295,7 +294,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
while (!blk_try_enter_queue(q, pm)) { while (!blk_try_enter_queue(q, pm)) {
if (flags & BLK_MQ_REQ_NOWAIT) if (flags & BLK_MQ_REQ_NOWAIT)
return -EBUSY; return -EAGAIN;
/* /*
* read pair of barrier in blk_freeze_queue_start(), we need to * read pair of barrier in blk_freeze_queue_start(), we need to
@ -325,7 +324,7 @@ int __bio_queue_enter(struct request_queue *q, struct bio *bio)
if (test_bit(GD_DEAD, &disk->state)) if (test_bit(GD_DEAD, &disk->state))
goto dead; goto dead;
bio_wouldblock_error(bio); bio_wouldblock_error(bio);
return -EBUSY; return -EAGAIN;
} }
/* /*
@ -426,7 +425,6 @@ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) PERCPU_REF_INIT_ATOMIC, GFP_KERNEL))
goto fail_stats; goto fail_stats;
blk_queue_dma_alignment(q, 511);
blk_set_default_limits(&q->limits); blk_set_default_limits(&q->limits);
q->nr_requests = BLKDEV_DEFAULT_RQ; q->nr_requests = BLKDEV_DEFAULT_RQ;
@ -487,18 +485,15 @@ static int __init fail_make_request_debugfs(void)
late_initcall(fail_make_request_debugfs); late_initcall(fail_make_request_debugfs);
#endif /* CONFIG_FAIL_MAKE_REQUEST */ #endif /* CONFIG_FAIL_MAKE_REQUEST */
static inline bool bio_check_ro(struct bio *bio) static inline void bio_check_ro(struct bio *bio)
{ {
if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) { if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
return false; return;
pr_warn("Trying to write to read-only block-device %pg\n", pr_warn("Trying to write to read-only block-device %pg\n",
bio->bi_bdev); bio->bi_bdev);
/* Older lvm-tools actually trigger this */ /* Older lvm-tools actually trigger this */
return false;
} }
return false;
} }
static noinline int should_fail_bio(struct bio *bio) static noinline int should_fail_bio(struct bio *bio)
@ -717,13 +712,12 @@ void submit_bio_noacct(struct bio *bio)
* For a REQ_NOWAIT based request, return -EOPNOTSUPP * For a REQ_NOWAIT based request, return -EOPNOTSUPP
* if queue does not support NOWAIT. * if queue does not support NOWAIT.
*/ */
if ((bio->bi_opf & REQ_NOWAIT) && !blk_queue_nowait(q)) if ((bio->bi_opf & REQ_NOWAIT) && !bdev_nowait(bdev))
goto not_supported; goto not_supported;
if (should_fail_bio(bio)) if (should_fail_bio(bio))
goto end_io; goto end_io;
if (unlikely(bio_check_ro(bio))) bio_check_ro(bio);
goto end_io;
if (!bio_flagged(bio, BIO_REMAPPED)) { if (!bio_flagged(bio, BIO_REMAPPED)) {
if (unlikely(bio_check_eod(bio))) if (unlikely(bio_check_eod(bio)))
goto end_io; goto end_io;
@ -814,7 +808,7 @@ EXPORT_SYMBOL(submit_bio_noacct);
* *
* The success/failure status of the request, along with notification of * The success/failure status of the request, along with notification of
* completion, is delivered asynchronously through the ->bi_end_io() callback * completion, is delivered asynchronously through the ->bi_end_io() callback
* in @bio. The bio must NOT be touched by thecaller until ->bi_end_io() has * in @bio. The bio must NOT be touched by the caller until ->bi_end_io() has
* been called. * been called.
*/ */
void submit_bio(struct bio *bio) void submit_bio(struct bio *bio)
@ -829,22 +823,6 @@ void submit_bio(struct bio *bio)
count_vm_events(PGPGOUT, bio_sectors(bio)); count_vm_events(PGPGOUT, bio_sectors(bio));
} }
/*
* If we're reading data that is part of the userspace workingset, count
* submission time as memory stall. When the device is congested, or
* the submitting cgroup IO-throttled, submission can be a significant
* part of overall IO time.
*/
if (unlikely(bio_op(bio) == REQ_OP_READ &&
bio_flagged(bio, BIO_WORKINGSET))) {
unsigned long pflags;
psi_memstall_enter(&pflags);
submit_bio_noacct(bio);
psi_memstall_leave(&pflags);
return;
}
submit_bio_noacct(bio); submit_bio_noacct(bio);
} }
EXPORT_SYMBOL(submit_bio); EXPORT_SYMBOL(submit_bio);
@ -871,6 +849,12 @@ int bio_poll(struct bio *bio, struct io_comp_batch *iob, unsigned int flags)
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) !test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
return 0; return 0;
/*
* As the requests that require a zone lock are not plugged in the
* first place, directly accessing the plug instead of using
* blk_mq_plug() should not have any consequences during flushing for
* zoned devices.
*/
blk_flush_plug(current->plug, false); blk_flush_plug(current->plug, false);
if (bio_queue_enter(bio)) if (bio_queue_enter(bio))

View File

@ -539,7 +539,7 @@ static int blk_crypto_fallback_init(void)
if (blk_crypto_fallback_inited) if (blk_crypto_fallback_inited)
return 0; return 0;
prandom_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE); get_random_bytes(blank_key, BLK_CRYPTO_MAX_KEY_SIZE);
err = bioset_init(&crypto_bio_split, 64, 0, 0); err = bioset_init(&crypto_bio_split, 64, 0, 0);
if (err) if (err)

View File

@ -205,7 +205,6 @@ static void blk_flush_complete_seq(struct request *rq,
* flush data request completion path. Restore @rq for * flush data request completion path. Restore @rq for
* normal completion and end it. * normal completion and end it.
*/ */
BUG_ON(!list_empty(&rq->queuelist));
list_del_init(&rq->flush.list); list_del_init(&rq->flush.list);
blk_flush_restore_request(rq); blk_flush_restore_request(rq);
blk_mq_end_request(rq, error); blk_mq_end_request(rq, error);
@ -218,7 +217,8 @@ static void blk_flush_complete_seq(struct request *rq,
blk_kick_flush(q, fq, cmd_flags); blk_kick_flush(q, fq, cmd_flags);
} }
static void flush_end_io(struct request *flush_rq, blk_status_t error) static enum rq_end_io_ret flush_end_io(struct request *flush_rq,
blk_status_t error)
{ {
struct request_queue *q = flush_rq->q; struct request_queue *q = flush_rq->q;
struct list_head *running; struct list_head *running;
@ -232,7 +232,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
if (!req_ref_put_and_test(flush_rq)) { if (!req_ref_put_and_test(flush_rq)) {
fq->rq_status = error; fq->rq_status = error;
spin_unlock_irqrestore(&fq->mq_flush_lock, flags); spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
return; return RQ_END_IO_NONE;
} }
blk_account_io_flush(flush_rq); blk_account_io_flush(flush_rq);
@ -269,6 +269,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
} }
spin_unlock_irqrestore(&fq->mq_flush_lock, flags); spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
return RQ_END_IO_NONE;
} }
bool is_flush_rq(struct request *rq) bool is_flush_rq(struct request *rq)
@ -354,7 +355,8 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
blk_flush_queue_rq(flush_rq, false); blk_flush_queue_rq(flush_rq, false);
} }
static void mq_flush_data_end_io(struct request *rq, blk_status_t error) static enum rq_end_io_ret mq_flush_data_end_io(struct request *rq,
blk_status_t error)
{ {
struct request_queue *q = rq->q; struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx = rq->mq_hctx; struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
@ -376,6 +378,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
spin_unlock_irqrestore(&fq->mq_flush_lock, flags); spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
blk_mq_sched_restart(hctx); blk_mq_sched_restart(hctx);
return RQ_END_IO_NONE;
} }
/** /**

View File

@ -664,17 +664,13 @@ static struct ioc *q_to_ioc(struct request_queue *q)
return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST)); return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
} }
static const char *q_name(struct request_queue *q)
{
if (blk_queue_registered(q))
return kobject_name(q->kobj.parent);
else
return "<unknown>";
}
static const char __maybe_unused *ioc_name(struct ioc *ioc) static const char __maybe_unused *ioc_name(struct ioc *ioc)
{ {
return q_name(ioc->rqos.q); struct gendisk *disk = ioc->rqos.q->disk;
if (!disk)
return "<unknown>";
return disk->disk_name;
} }
static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd) static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
@ -1430,7 +1426,7 @@ static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
int flags, void *key) int flags, void *key)
{ {
struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait); struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key; struct iocg_wake_ctx *ctx = key;
u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse); u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
ctx->vbudget -= cost; ctx->vbudget -= cost;
@ -2640,7 +2636,7 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
if (use_debt) { if (use_debt) {
iocg_incur_debt(iocg, abs_cost, &now); iocg_incur_debt(iocg, abs_cost, &now);
if (iocg_kick_delay(iocg, &now)) if (iocg_kick_delay(iocg, &now))
blkcg_schedule_throttle(rqos->q, blkcg_schedule_throttle(rqos->q->disk,
(bio->bi_opf & REQ_SWAP) == REQ_SWAP); (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
iocg_unlock(iocg, ioc_locked, &flags); iocg_unlock(iocg, ioc_locked, &flags);
return; return;
@ -2741,7 +2737,7 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
if (likely(!list_empty(&iocg->active_list))) { if (likely(!list_empty(&iocg->active_list))) {
iocg_incur_debt(iocg, abs_cost, &now); iocg_incur_debt(iocg, abs_cost, &now);
if (iocg_kick_delay(iocg, &now)) if (iocg_kick_delay(iocg, &now))
blkcg_schedule_throttle(rqos->q, blkcg_schedule_throttle(rqos->q->disk,
(bio->bi_opf & REQ_SWAP) == REQ_SWAP); (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
} else { } else {
iocg_commit_bio(iocg, bio, abs_cost, cost); iocg_commit_bio(iocg, bio, abs_cost, cost);
@ -2832,8 +2828,9 @@ static struct rq_qos_ops ioc_rqos_ops = {
.exit = ioc_rqos_exit, .exit = ioc_rqos_exit,
}; };
static int blk_iocost_init(struct request_queue *q) static int blk_iocost_init(struct gendisk *disk)
{ {
struct request_queue *q = disk->queue;
struct ioc *ioc; struct ioc *ioc;
struct rq_qos *rqos; struct rq_qos *rqos;
int i, cpu, ret; int i, cpu, ret;
@ -3170,6 +3167,7 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
size_t nbytes, loff_t off) size_t nbytes, loff_t off)
{ {
struct block_device *bdev; struct block_device *bdev;
struct gendisk *disk;
struct ioc *ioc; struct ioc *ioc;
u32 qos[NR_QOS_PARAMS]; u32 qos[NR_QOS_PARAMS];
bool enable, user; bool enable, user;
@ -3180,12 +3178,13 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
if (IS_ERR(bdev)) if (IS_ERR(bdev))
return PTR_ERR(bdev); return PTR_ERR(bdev);
ioc = q_to_ioc(bdev_get_queue(bdev)); disk = bdev->bd_disk;
ioc = q_to_ioc(disk->queue);
if (!ioc) { if (!ioc) {
ret = blk_iocost_init(bdev_get_queue(bdev)); ret = blk_iocost_init(disk);
if (ret) if (ret)
goto err; goto err;
ioc = q_to_ioc(bdev_get_queue(bdev)); ioc = q_to_ioc(disk->queue);
} }
spin_lock_irq(&ioc->lock); spin_lock_irq(&ioc->lock);
@ -3262,11 +3261,11 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
spin_lock_irq(&ioc->lock); spin_lock_irq(&ioc->lock);
if (enable) { if (enable) {
blk_stat_enable_accounting(ioc->rqos.q); blk_stat_enable_accounting(disk->queue);
blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q); blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
ioc->enabled = true; ioc->enabled = true;
} else { } else {
blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q); blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
ioc->enabled = false; ioc->enabled = false;
} }
@ -3349,7 +3348,7 @@ static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
ioc = q_to_ioc(bdev_get_queue(bdev)); ioc = q_to_ioc(bdev_get_queue(bdev));
if (!ioc) { if (!ioc) {
ret = blk_iocost_init(bdev_get_queue(bdev)); ret = blk_iocost_init(bdev->bd_disk);
if (ret) if (ret)
goto err; goto err;
ioc = q_to_ioc(bdev_get_queue(bdev)); ioc = q_to_ioc(bdev_get_queue(bdev));

View File

@ -292,7 +292,7 @@ static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay); unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
if (use_delay) if (use_delay)
blkcg_schedule_throttle(rqos->q, use_memdelay); blkcg_schedule_throttle(rqos->q->disk, use_memdelay);
/* /*
* To avoid priority inversions we want to just take a slot if we are * To avoid priority inversions we want to just take a slot if we are
@ -756,8 +756,9 @@ static void blkiolatency_enable_work_fn(struct work_struct *work)
} }
} }
int blk_iolatency_init(struct request_queue *q) int blk_iolatency_init(struct gendisk *disk)
{ {
struct request_queue *q = disk->queue;
struct blk_iolatency *blkiolat; struct blk_iolatency *blkiolat;
struct rq_qos *rqos; struct rq_qos *rqos;
int ret; int ret;

View File

@ -202,14 +202,14 @@ void blkcg_set_ioprio(struct bio *bio)
bio->bi_ioprio = prio; bio->bi_ioprio = prio;
} }
void blk_ioprio_exit(struct request_queue *q) void blk_ioprio_exit(struct gendisk *disk)
{ {
blkcg_deactivate_policy(q, &ioprio_policy); blkcg_deactivate_policy(disk->queue, &ioprio_policy);
} }
int blk_ioprio_init(struct request_queue *q) int blk_ioprio_init(struct gendisk *disk)
{ {
return blkcg_activate_policy(q, &ioprio_policy); return blkcg_activate_policy(disk->queue, &ioprio_policy);
} }
static int __init ioprio_init(void) static int __init ioprio_init(void)

View File

@ -9,15 +9,15 @@ struct request_queue;
struct bio; struct bio;
#ifdef CONFIG_BLK_CGROUP_IOPRIO #ifdef CONFIG_BLK_CGROUP_IOPRIO
int blk_ioprio_init(struct request_queue *q); int blk_ioprio_init(struct gendisk *disk);
void blk_ioprio_exit(struct request_queue *q); void blk_ioprio_exit(struct gendisk *disk);
void blkcg_set_ioprio(struct bio *bio); void blkcg_set_ioprio(struct bio *bio);
#else #else
static inline int blk_ioprio_init(struct request_queue *q) static inline int blk_ioprio_init(struct gendisk *disk)
{ {
return 0; return 0;
} }
static inline void blk_ioprio_exit(struct request_queue *q) static inline void blk_ioprio_exit(struct gendisk *disk)
{ {
} }
static inline void blkcg_set_ioprio(struct bio *bio) static inline void blkcg_set_ioprio(struct bio *bio)

View File

@ -309,6 +309,11 @@ int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
struct blk_plug plug; struct blk_plug plug;
int ret = 0; int ret = 0;
/* make sure that "len << SECTOR_SHIFT" doesn't overflow */
if (max_sectors > UINT_MAX >> SECTOR_SHIFT)
max_sectors = UINT_MAX >> SECTOR_SHIFT;
max_sectors &= ~bs_mask;
if (max_sectors == 0) if (max_sectors == 0)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if ((sector | nr_sects) & bs_mask) if ((sector | nr_sects) & bs_mask)
@ -322,10 +327,10 @@ int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp); bio = blk_next_bio(bio, bdev, 0, REQ_OP_SECURE_ERASE, gfp);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio->bi_iter.bi_size = len; bio->bi_iter.bi_size = len << SECTOR_SHIFT;
sector += len << SECTOR_SHIFT; sector += len;
nr_sects -= len << SECTOR_SHIFT; nr_sects -= len;
if (!nr_sects) { if (!nr_sects) {
ret = submit_bio_wait(bio); ret = submit_bio_wait(bio);
bio_put(bio); bio_put(bio);

View File

@ -158,7 +158,7 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq)); bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq));
if (map_data) { if (map_data) {
nr_pages = 1 << map_data->page_order; nr_pages = 1U << map_data->page_order;
i = map_data->offset / PAGE_SIZE; i = map_data->offset / PAGE_SIZE;
} }
while (len) { while (len) {
@ -231,6 +231,37 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
return ret; return ret;
} }
static void blk_mq_map_bio_put(struct bio *bio)
{
if (bio->bi_opf & REQ_ALLOC_CACHE) {
bio_put(bio);
} else {
bio_uninit(bio);
kfree(bio);
}
}
static struct bio *blk_rq_map_bio_alloc(struct request *rq,
unsigned int nr_vecs, gfp_t gfp_mask)
{
struct bio *bio;
if (rq->cmd_flags & REQ_POLLED) {
blk_opf_t opf = rq->cmd_flags | REQ_ALLOC_CACHE;
bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask,
&fs_bio_set);
if (!bio)
return NULL;
} else {
bio = bio_kmalloc(nr_vecs, gfp_mask);
if (!bio)
return NULL;
bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
}
return bio;
}
static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
@ -243,18 +274,24 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
if (!iov_iter_count(iter)) if (!iov_iter_count(iter))
return -EINVAL; return -EINVAL;
bio = bio_kmalloc(nr_vecs, gfp_mask); bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
if (!bio) if (bio == NULL)
return -ENOMEM; return -ENOMEM;
bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
while (iov_iter_count(iter)) { while (iov_iter_count(iter)) {
struct page **pages; struct page **pages, *stack_pages[UIO_FASTIOV];
ssize_t bytes; ssize_t bytes;
size_t offs, added = 0; size_t offs;
int npages; int npages;
bytes = iov_iter_get_pages_alloc2(iter, &pages, LONG_MAX, &offs); if (nr_vecs <= ARRAY_SIZE(stack_pages)) {
pages = stack_pages;
bytes = iov_iter_get_pages2(iter, pages, LONG_MAX,
nr_vecs, &offs);
} else {
bytes = iov_iter_get_pages_alloc2(iter, &pages,
LONG_MAX, &offs);
}
if (unlikely(bytes <= 0)) { if (unlikely(bytes <= 0)) {
ret = bytes ? bytes : -EFAULT; ret = bytes ? bytes : -EFAULT;
goto out_unmap; goto out_unmap;
@ -280,7 +317,6 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
break; break;
} }
added += n;
bytes -= n; bytes -= n;
offs = 0; offs = 0;
} }
@ -290,7 +326,8 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
*/ */
while (j < npages) while (j < npages)
put_page(pages[j++]); put_page(pages[j++]);
kvfree(pages); if (pages != stack_pages)
kvfree(pages);
/* couldn't stuff something into bio? */ /* couldn't stuff something into bio? */
if (bytes) { if (bytes) {
iov_iter_revert(iter, bytes); iov_iter_revert(iter, bytes);
@ -305,8 +342,7 @@ static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
out_unmap: out_unmap:
bio_release_pages(bio, false); bio_release_pages(bio, false);
bio_uninit(bio); blk_mq_map_bio_put(bio);
kfree(bio);
return ret; return ret;
} }
@ -512,6 +548,62 @@ int blk_rq_append_bio(struct request *rq, struct bio *bio)
} }
EXPORT_SYMBOL(blk_rq_append_bio); EXPORT_SYMBOL(blk_rq_append_bio);
/* Prepare bio for passthrough IO given ITER_BVEC iter */
static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
{
struct request_queue *q = rq->q;
size_t nr_iter = iov_iter_count(iter);
size_t nr_segs = iter->nr_segs;
struct bio_vec *bvecs, *bvprvp = NULL;
struct queue_limits *lim = &q->limits;
unsigned int nsegs = 0, bytes = 0;
struct bio *bio;
size_t i;
if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
return -EINVAL;
if (nr_segs > queue_max_segments(q))
return -EINVAL;
/* no iovecs to alloc, as we already have a BVEC iterator */
bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
if (bio == NULL)
return -ENOMEM;
bio_iov_bvec_set(bio, (struct iov_iter *)iter);
blk_rq_bio_prep(rq, bio, nr_segs);
/* loop to perform a bunch of sanity checks */
bvecs = (struct bio_vec *)iter->bvec;
for (i = 0; i < nr_segs; i++) {
struct bio_vec *bv = &bvecs[i];
/*
* If the queue doesn't support SG gaps and adding this
* offset would create a gap, fallback to copy.
*/
if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
blk_mq_map_bio_put(bio);
return -EREMOTEIO;
}
/* check full condition */
if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
goto put_bio;
if (bytes + bv->bv_len > nr_iter)
goto put_bio;
if (bv->bv_offset + bv->bv_len > PAGE_SIZE)
goto put_bio;
nsegs++;
bytes += bv->bv_len;
bvprvp = bv;
}
return 0;
put_bio:
blk_mq_map_bio_put(bio);
return -EINVAL;
}
/** /**
* blk_rq_map_user_iov - map user data to a request, for passthrough requests * blk_rq_map_user_iov - map user data to a request, for passthrough requests
* @q: request queue where request should be inserted * @q: request queue where request should be inserted
@ -531,24 +623,35 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data, struct rq_map_data *map_data,
const struct iov_iter *iter, gfp_t gfp_mask) const struct iov_iter *iter, gfp_t gfp_mask)
{ {
bool copy = false; bool copy = false, map_bvec = false;
unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
struct bio *bio = NULL; struct bio *bio = NULL;
struct iov_iter i; struct iov_iter i;
int ret = -EINVAL; int ret = -EINVAL;
if (!iter_is_iovec(iter))
goto fail;
if (map_data) if (map_data)
copy = true; copy = true;
else if (blk_queue_may_bounce(q)) else if (blk_queue_may_bounce(q))
copy = true; copy = true;
else if (iov_iter_alignment(iter) & align) else if (iov_iter_alignment(iter) & align)
copy = true; copy = true;
else if (iov_iter_is_bvec(iter))
map_bvec = true;
else if (!iter_is_iovec(iter))
copy = true;
else if (queue_virt_boundary(q)) else if (queue_virt_boundary(q))
copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
if (map_bvec) {
ret = blk_rq_map_user_bvec(rq, iter);
if (!ret)
return 0;
if (ret != -EREMOTEIO)
goto fail;
/* fall back to copying the data on limits mismatches */
copy = true;
}
i = *iter; i = *iter;
do { do {
if (copy) if (copy)
@ -586,6 +689,42 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
} }
EXPORT_SYMBOL(blk_rq_map_user); EXPORT_SYMBOL(blk_rq_map_user);
int blk_rq_map_user_io(struct request *req, struct rq_map_data *map_data,
void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask,
bool vec, int iov_count, bool check_iter_count, int rw)
{
int ret = 0;
if (vec) {
struct iovec fast_iov[UIO_FASTIOV];
struct iovec *iov = fast_iov;
struct iov_iter iter;
ret = import_iovec(rw, ubuf, iov_count ? iov_count : buf_len,
UIO_FASTIOV, &iov, &iter);
if (ret < 0)
return ret;
if (iov_count) {
/* SG_IO howto says that the shorter of the two wins */
iov_iter_truncate(&iter, buf_len);
if (check_iter_count && !iov_iter_count(&iter)) {
kfree(iov);
return -EINVAL;
}
}
ret = blk_rq_map_user_iov(req->q, req, map_data, &iter,
gfp_mask);
kfree(iov);
} else if (buf_len) {
ret = blk_rq_map_user(req->q, req, map_data, ubuf, buf_len,
gfp_mask);
}
return ret;
}
EXPORT_SYMBOL(blk_rq_map_user_io);
/** /**
* blk_rq_unmap_user - unmap a request with user data * blk_rq_unmap_user - unmap a request with user data
* @bio: start of bio list * @bio: start of bio list
@ -611,8 +750,7 @@ int blk_rq_unmap_user(struct bio *bio)
next_bio = bio; next_bio = bio;
bio = bio->bi_next; bio = bio->bi_next;
bio_uninit(next_bio); blk_mq_map_bio_put(next_bio);
kfree(next_bio);
} }
return ret; return ret;

View File

@ -32,7 +32,7 @@ static int get_first_sibling(unsigned int cpu)
return cpu; return cpu;
} }
int blk_mq_map_queues(struct blk_mq_queue_map *qmap) void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
{ {
unsigned int *map = qmap->mq_map; unsigned int *map = qmap->mq_map;
unsigned int nr_queues = qmap->nr_queues; unsigned int nr_queues = qmap->nr_queues;
@ -70,8 +70,6 @@ int blk_mq_map_queues(struct blk_mq_queue_map *qmap)
map[cpu] = map[first_sibling]; map[cpu] = map[first_sibling];
} }
} }
return 0;
} }
EXPORT_SYMBOL_GPL(blk_mq_map_queues); EXPORT_SYMBOL_GPL(blk_mq_map_queues);

View File

@ -807,8 +807,6 @@ static const char *rq_qos_id_to_name(enum rq_qos_id id)
return "latency"; return "latency";
case RQ_QOS_COST: case RQ_QOS_COST:
return "cost"; return "cost";
case RQ_QOS_IOPRIO:
return "ioprio";
} }
return "unknown"; return "unknown";
} }

View File

@ -23,8 +23,8 @@
* that maps a queue to the CPUs that have irq affinity for the corresponding * that maps a queue to the CPUs that have irq affinity for the corresponding
* vector. * vector.
*/ */
int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev, void blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
int offset) int offset)
{ {
const struct cpumask *mask; const struct cpumask *mask;
unsigned int queue, cpu; unsigned int queue, cpu;
@ -38,11 +38,10 @@ int blk_mq_pci_map_queues(struct blk_mq_queue_map *qmap, struct pci_dev *pdev,
qmap->mq_map[cpu] = qmap->queue_offset + queue; qmap->mq_map[cpu] = qmap->queue_offset + queue;
} }
return 0; return;
fallback: fallback:
WARN_ON_ONCE(qmap->nr_queues > 1); WARN_ON_ONCE(qmap->nr_queues > 1);
blk_mq_clear_mq_map(qmap); blk_mq_clear_mq_map(qmap);
return 0;
} }
EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues); EXPORT_SYMBOL_GPL(blk_mq_pci_map_queues);

View File

@ -21,7 +21,7 @@
* @set->nr_hw_queues, or @dev does not provide an affinity mask for a * @set->nr_hw_queues, or @dev does not provide an affinity mask for a
* vector, we fallback to the naive mapping. * vector, we fallback to the naive mapping.
*/ */
int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map, void blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
struct ib_device *dev, int first_vec) struct ib_device *dev, int first_vec)
{ {
const struct cpumask *mask; const struct cpumask *mask;
@ -36,9 +36,9 @@ int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
map->mq_map[cpu] = map->queue_offset + queue; map->mq_map[cpu] = map->queue_offset + queue;
} }
return 0; return;
fallback: fallback:
return blk_mq_map_queues(map); blk_mq_map_queues(map);
} }
EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues); EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);

View File

@ -196,7 +196,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
* other allocations on previous queue won't be starved. * other allocations on previous queue won't be starved.
*/ */
if (bt != bt_prev) if (bt != bt_prev)
sbitmap_queue_wake_up(bt_prev); sbitmap_queue_wake_up(bt_prev, 1);
ws = bt_wait_ptr(bt, data->hctx); ws = bt_wait_ptr(bt, data->hctx);
} while (1); } while (1);

View File

@ -21,7 +21,7 @@
* that maps a queue to the CPUs that have irq affinity for the corresponding * that maps a queue to the CPUs that have irq affinity for the corresponding
* vector. * vector.
*/ */
int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap, void blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
struct virtio_device *vdev, int first_vec) struct virtio_device *vdev, int first_vec)
{ {
const struct cpumask *mask; const struct cpumask *mask;
@ -39,8 +39,9 @@ int blk_mq_virtio_map_queues(struct blk_mq_queue_map *qmap,
qmap->mq_map[cpu] = qmap->queue_offset + queue; qmap->mq_map[cpu] = qmap->queue_offset + queue;
} }
return 0; return;
fallback: fallback:
return blk_mq_map_queues(qmap); blk_mq_map_queues(qmap);
} }
EXPORT_SYMBOL_GPL(blk_mq_virtio_map_queues); EXPORT_SYMBOL_GPL(blk_mq_virtio_map_queues);

View File

@ -510,25 +510,87 @@ static struct request *__blk_mq_alloc_requests(struct blk_mq_alloc_data *data)
alloc_time_ns); alloc_time_ns);
} }
struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf, static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
blk_mq_req_flags_t flags) struct blk_plug *plug,
blk_opf_t opf,
blk_mq_req_flags_t flags)
{ {
struct blk_mq_alloc_data data = { struct blk_mq_alloc_data data = {
.q = q, .q = q,
.flags = flags, .flags = flags,
.cmd_flags = opf, .cmd_flags = opf,
.nr_tags = 1, .nr_tags = plug->nr_ios,
.cached_rq = &plug->cached_rq,
}; };
struct request *rq; struct request *rq;
int ret;
ret = blk_queue_enter(q, flags); if (blk_queue_enter(q, flags))
if (ret) return NULL;
return ERR_PTR(ret);
plug->nr_ios = 1;
rq = __blk_mq_alloc_requests(&data); rq = __blk_mq_alloc_requests(&data);
if (!rq) if (unlikely(!rq))
goto out_queue_exit; blk_queue_exit(q);
return rq;
}
static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
blk_opf_t opf,
blk_mq_req_flags_t flags)
{
struct blk_plug *plug = current->plug;
struct request *rq;
if (!plug)
return NULL;
if (rq_list_empty(plug->cached_rq)) {
if (plug->nr_ios == 1)
return NULL;
rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
if (rq)
goto got_it;
return NULL;
}
rq = rq_list_peek(&plug->cached_rq);
if (!rq || rq->q != q)
return NULL;
if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type)
return NULL;
if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
return NULL;
plug->cached_rq = rq_list_next(rq);
got_it:
rq->cmd_flags = opf;
INIT_LIST_HEAD(&rq->queuelist);
return rq;
}
struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
blk_mq_req_flags_t flags)
{
struct request *rq;
rq = blk_mq_alloc_cached_request(q, opf, flags);
if (!rq) {
struct blk_mq_alloc_data data = {
.q = q,
.flags = flags,
.cmd_flags = opf,
.nr_tags = 1,
};
int ret;
ret = blk_queue_enter(q, flags);
if (ret)
return ERR_PTR(ret);
rq = __blk_mq_alloc_requests(&data);
if (!rq)
goto out_queue_exit;
}
rq->__data_len = 0; rq->__data_len = 0;
rq->__sector = (sector_t) -1; rq->__sector = (sector_t) -1;
rq->bio = rq->biotail = NULL; rq->bio = rq->biotail = NULL;
@ -549,6 +611,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
.nr_tags = 1, .nr_tags = 1,
}; };
u64 alloc_time_ns = 0; u64 alloc_time_ns = 0;
struct request *rq;
unsigned int cpu; unsigned int cpu;
unsigned int tag; unsigned int tag;
int ret; int ret;
@ -598,8 +661,12 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
tag = blk_mq_get_tag(&data); tag = blk_mq_get_tag(&data);
if (tag == BLK_MQ_NO_TAG) if (tag == BLK_MQ_NO_TAG)
goto out_queue_exit; goto out_queue_exit;
return blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag, rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag,
alloc_time_ns); alloc_time_ns);
rq->__data_len = 0;
rq->__sector = (sector_t) -1;
rq->bio = rq->biotail = NULL;
return rq;
out_queue_exit: out_queue_exit:
blk_queue_exit(q); blk_queue_exit(q);
@ -761,8 +828,10 @@ static void blk_complete_request(struct request *req)
* can find how many bytes remain in the request * can find how many bytes remain in the request
* later. * later.
*/ */
req->bio = NULL; if (!req->end_io) {
req->__data_len = 0; req->bio = NULL;
req->__data_len = 0;
}
} }
/** /**
@ -939,7 +1008,8 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
if (rq->end_io) { if (rq->end_io) {
rq_qos_done(rq->q, rq); rq_qos_done(rq->q, rq);
rq->end_io(rq, error); if (rq->end_io(rq, error) == RQ_END_IO_FREE)
blk_mq_free_request(rq);
} else { } else {
blk_mq_free_request(rq); blk_mq_free_request(rq);
} }
@ -992,6 +1062,13 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob)
rq_qos_done(rq->q, rq); rq_qos_done(rq->q, rq);
/*
* If end_io handler returns NONE, then it still has
* ownership of the request.
*/
if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE)
continue;
WRITE_ONCE(rq->state, MQ_RQ_IDLE); WRITE_ONCE(rq->state, MQ_RQ_IDLE);
if (!req_ref_put_and_test(rq)) if (!req_ref_put_and_test(rq))
continue; continue;
@ -1093,10 +1170,12 @@ bool blk_mq_complete_request_remote(struct request *rq)
WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
/* /*
* For a polled request, always complete locally, it's pointless * For request which hctx has only one ctx mapping,
* to redirect the completion. * or a polled request, always complete locally,
* it's pointless to redirect the completion.
*/ */
if (rq->cmd_flags & REQ_POLLED) if (rq->mq_hctx->nr_ctx == 1 ||
rq->cmd_flags & REQ_POLLED)
return false; return false;
if (blk_mq_complete_need_ipi(rq)) { if (blk_mq_complete_need_ipi(rq)) {
@ -1183,6 +1262,7 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
(!blk_queue_nomerges(rq->q) && (!blk_queue_nomerges(rq->q) &&
blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) { blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
blk_mq_flush_plug_list(plug, false); blk_mq_flush_plug_list(plug, false);
last = NULL;
trace_block_plug(rq->q); trace_block_plug(rq->q);
} }
@ -1213,6 +1293,12 @@ void blk_execute_rq_nowait(struct request *rq, bool at_head)
WARN_ON(!blk_rq_is_passthrough(rq)); WARN_ON(!blk_rq_is_passthrough(rq));
blk_account_io_start(rq); blk_account_io_start(rq);
/*
* As plugging can be enabled for passthrough requests on a zoned
* device, directly accessing the plug instead of using blk_mq_plug()
* should not have any consequences.
*/
if (current->plug) if (current->plug)
blk_add_rq_to_plug(current->plug, rq); blk_add_rq_to_plug(current->plug, rq);
else else
@ -1225,15 +1311,16 @@ struct blk_rq_wait {
blk_status_t ret; blk_status_t ret;
}; };
static void blk_end_sync_rq(struct request *rq, blk_status_t ret) static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
{ {
struct blk_rq_wait *wait = rq->end_io_data; struct blk_rq_wait *wait = rq->end_io_data;
wait->ret = ret; wait->ret = ret;
complete(&wait->done); complete(&wait->done);
return RQ_END_IO_NONE;
} }
static bool blk_rq_is_poll(struct request *rq) bool blk_rq_is_poll(struct request *rq)
{ {
if (!rq->mq_hctx) if (!rq->mq_hctx)
return false; return false;
@ -1243,6 +1330,7 @@ static bool blk_rq_is_poll(struct request *rq)
return false; return false;
return true; return true;
} }
EXPORT_SYMBOL_GPL(blk_rq_is_poll);
static void blk_rq_poll_completion(struct request *rq, struct completion *wait) static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
{ {
@ -1463,10 +1551,12 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
void blk_mq_put_rq_ref(struct request *rq) void blk_mq_put_rq_ref(struct request *rq)
{ {
if (is_flush_rq(rq)) if (is_flush_rq(rq)) {
rq->end_io(rq, 0); if (rq->end_io(rq, 0) == RQ_END_IO_FREE)
else if (req_ref_put_and_test(rq)) blk_mq_free_request(rq);
} else if (req_ref_put_and_test(rq)) {
__blk_mq_free_request(rq); __blk_mq_free_request(rq);
}
} }
static bool blk_mq_check_expired(struct request *rq, void *priv) static bool blk_mq_check_expired(struct request *rq, void *priv)
@ -1992,7 +2082,7 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list,
if (!needs_restart || if (!needs_restart ||
(no_tag && list_empty_careful(&hctx->dispatch_wait.entry))) (no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
blk_mq_run_hw_queue(hctx, true); blk_mq_run_hw_queue(hctx, true);
else if (needs_restart && needs_resource) else if (needs_resource)
blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY); blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
blk_mq_update_dispatch_busy(hctx, true); blk_mq_update_dispatch_busy(hctx, true);
@ -3028,8 +3118,11 @@ static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
struct page *page; struct page *page;
unsigned long flags; unsigned long flags;
/* There is no need to clear a driver tags own mapping */ /*
if (drv_tags == tags) * There is no need to clear mapping if driver tags is not initialized
* or the mapping belongs to the driver tags.
*/
if (!drv_tags || drv_tags == tags)
return; return;
list_for_each_entry(page, &tags->page_list, lru) { list_for_each_entry(page, &tags->page_list, lru) {
@ -3952,9 +4045,14 @@ EXPORT_SYMBOL(__blk_mq_alloc_disk);
struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q, struct gendisk *blk_mq_alloc_disk_for_queue(struct request_queue *q,
struct lock_class_key *lkclass) struct lock_class_key *lkclass)
{ {
struct gendisk *disk;
if (!blk_get_queue(q)) if (!blk_get_queue(q))
return NULL; return NULL;
return __alloc_disk_node(q, NUMA_NO_NODE, lkclass); disk = __alloc_disk_node(q, NUMA_NO_NODE, lkclass);
if (!disk)
blk_put_queue(q);
return disk;
} }
EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue); EXPORT_SYMBOL(blk_mq_alloc_disk_for_queue);
@ -4101,9 +4199,7 @@ int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
return 0; return 0;
err_hctxs: err_hctxs:
xa_destroy(&q->hctx_table); blk_mq_release(q);
q->nr_hw_queues = 0;
blk_mq_sysfs_deinit(q);
err_poll: err_poll:
blk_stat_free_callback(q->poll_cb); blk_stat_free_callback(q->poll_cb);
q->poll_cb = NULL; q->poll_cb = NULL;
@ -4191,7 +4287,7 @@ static int blk_mq_alloc_set_map_and_rqs(struct blk_mq_tag_set *set)
return 0; return 0;
} }
static int blk_mq_update_queue_map(struct blk_mq_tag_set *set) static void blk_mq_update_queue_map(struct blk_mq_tag_set *set)
{ {
/* /*
* blk_mq_map_queues() and multiple .map_queues() implementations * blk_mq_map_queues() and multiple .map_queues() implementations
@ -4221,10 +4317,10 @@ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
for (i = 0; i < set->nr_maps; i++) for (i = 0; i < set->nr_maps; i++)
blk_mq_clear_mq_map(&set->map[i]); blk_mq_clear_mq_map(&set->map[i]);
return set->ops->map_queues(set); set->ops->map_queues(set);
} else { } else {
BUG_ON(set->nr_maps > 1); BUG_ON(set->nr_maps > 1);
return blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]); blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
} }
} }
@ -4323,9 +4419,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues; set->map[i].nr_queues = is_kdump_kernel() ? 1 : set->nr_hw_queues;
} }
ret = blk_mq_update_queue_map(set); blk_mq_update_queue_map(set);
if (ret)
goto out_free_mq_map;
ret = blk_mq_alloc_set_map_and_rqs(set); ret = blk_mq_alloc_set_map_and_rqs(set);
if (ret) if (ret)
@ -4473,14 +4567,14 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
list_add(&qe->node, head); list_add(&qe->node, head);
/* /*
* After elevator_switch_mq, the previous elevator_queue will be * After elevator_switch, the previous elevator_queue will be
* released by elevator_release. The reference of the io scheduler * released by elevator_release. The reference of the io scheduler
* module get by elevator_get will also be put. So we need to get * module get by elevator_get will also be put. So we need to get
* a reference of the io scheduler module here to prevent it to be * a reference of the io scheduler module here to prevent it to be
* removed. * removed.
*/ */
__module_get(qe->type->elevator_owner); __module_get(qe->type->elevator_owner);
elevator_switch_mq(q, NULL); elevator_switch(q, NULL);
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
return true; return true;
@ -4512,7 +4606,7 @@ static void blk_mq_elv_switch_back(struct list_head *head,
kfree(qe); kfree(qe);
mutex_lock(&q->sysfs_lock); mutex_lock(&q->sysfs_lock);
elevator_switch_mq(q, t); elevator_switch(q, t);
mutex_unlock(&q->sysfs_lock); mutex_unlock(&q->sysfs_lock);
} }

View File

@ -312,7 +312,8 @@ static inline void blk_mq_clear_mq_map(struct blk_mq_queue_map *qmap)
static inline struct blk_plug *blk_mq_plug( struct bio *bio) static inline struct blk_plug *blk_mq_plug( struct bio *bio)
{ {
/* Zoned block device write operation case: do not plug the BIO */ /* Zoned block device write operation case: do not plug the BIO */
if (bdev_is_zoned(bio->bi_bdev) && op_is_write(bio_op(bio))) if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
bdev_op_is_zoned_write(bio->bi_bdev, bio_op(bio)))
return NULL; return NULL;
/* /*

View File

@ -17,7 +17,6 @@ enum rq_qos_id {
RQ_QOS_WBT, RQ_QOS_WBT,
RQ_QOS_LATENCY, RQ_QOS_LATENCY,
RQ_QOS_COST, RQ_QOS_COST,
RQ_QOS_IOPRIO,
}; };
struct rq_wait { struct rq_wait {

View File

@ -57,8 +57,8 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->misaligned = 0; lim->misaligned = 0;
lim->zoned = BLK_ZONED_NONE; lim->zoned = BLK_ZONED_NONE;
lim->zone_write_granularity = 0; lim->zone_write_granularity = 0;
lim->dma_alignment = 511;
} }
EXPORT_SYMBOL(blk_set_default_limits);
/** /**
* blk_set_stacking_limits - set default limits for stacking devices * blk_set_stacking_limits - set default limits for stacking devices
@ -600,6 +600,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->io_min = max(t->io_min, b->io_min); t->io_min = max(t->io_min, b->io_min);
t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
/* Set non-power-of-2 compatible chunk_sectors boundary */ /* Set non-power-of-2 compatible chunk_sectors boundary */
if (b->chunk_sectors) if (b->chunk_sectors)
@ -773,7 +774,7 @@ EXPORT_SYMBOL(blk_queue_virt_boundary);
**/ **/
void blk_queue_dma_alignment(struct request_queue *q, int mask) void blk_queue_dma_alignment(struct request_queue *q, int mask)
{ {
q->dma_alignment = mask; q->limits.dma_alignment = mask;
} }
EXPORT_SYMBOL(blk_queue_dma_alignment); EXPORT_SYMBOL(blk_queue_dma_alignment);
@ -795,8 +796,8 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
{ {
BUG_ON(mask > PAGE_SIZE); BUG_ON(mask > PAGE_SIZE);
if (mask > q->dma_alignment) if (mask > q->limits.dma_alignment)
q->dma_alignment = mask; q->limits.dma_alignment = mask;
} }
EXPORT_SYMBOL(blk_queue_update_dma_alignment); EXPORT_SYMBOL(blk_queue_update_dma_alignment);

View File

@ -844,7 +844,7 @@ int blk_register_queue(struct gendisk *disk)
blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
wbt_enable_default(q); wbt_enable_default(q);
blk_throtl_register_queue(q); blk_throtl_register(disk);
/* Now everything is ready and send out KOBJ_ADD uevent */ /* Now everything is ready and send out KOBJ_ADD uevent */
kobject_uevent(&q->kobj, KOBJ_ADD); kobject_uevent(&q->kobj, KOBJ_ADD);

View File

@ -329,8 +329,8 @@ static struct bio *throtl_pop_queued(struct list_head *queued,
/* init a service_queue, assumes the caller zeroed it */ /* init a service_queue, assumes the caller zeroed it */
static void throtl_service_queue_init(struct throtl_service_queue *sq) static void throtl_service_queue_init(struct throtl_service_queue *sq)
{ {
INIT_LIST_HEAD(&sq->queued[0]); INIT_LIST_HEAD(&sq->queued[READ]);
INIT_LIST_HEAD(&sq->queued[1]); INIT_LIST_HEAD(&sq->queued[WRITE]);
sq->pending_tree = RB_ROOT_CACHED; sq->pending_tree = RB_ROOT_CACHED;
timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0); timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
} }
@ -420,24 +420,17 @@ static void tg_update_has_rules(struct throtl_grp *tg)
struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq); struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
struct throtl_data *td = tg->td; struct throtl_data *td = tg->td;
int rw; int rw;
int has_iops_limit = 0;
for (rw = READ; rw <= WRITE; rw++) { for (rw = READ; rw <= WRITE; rw++) {
unsigned int iops_limit = tg_iops_limit(tg, rw); tg->has_rules_iops[rw] =
(parent_tg && parent_tg->has_rules_iops[rw]) ||
tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
(td->limit_valid[td->limit_index] && (td->limit_valid[td->limit_index] &&
(tg_bps_limit(tg, rw) != U64_MAX || tg_iops_limit(tg, rw) != UINT_MAX);
iops_limit != UINT_MAX)); tg->has_rules_bps[rw] =
(parent_tg && parent_tg->has_rules_bps[rw]) ||
if (iops_limit != UINT_MAX) (td->limit_valid[td->limit_index] &&
has_iops_limit = 1; (tg_bps_limit(tg, rw) != U64_MAX));
} }
if (has_iops_limit)
tg->flags |= THROTL_TG_HAS_IOPS_LIMIT;
else
tg->flags &= ~THROTL_TG_HAS_IOPS_LIMIT;
} }
static void throtl_pd_online(struct blkg_policy_data *pd) static void throtl_pd_online(struct blkg_policy_data *pd)
@ -520,7 +513,6 @@ static void throtl_rb_erase(struct rb_node *n,
{ {
rb_erase_cached(n, &parent_sq->pending_tree); rb_erase_cached(n, &parent_sq->pending_tree);
RB_CLEAR_NODE(n); RB_CLEAR_NODE(n);
--parent_sq->nr_pending;
} }
static void update_min_dispatch_time(struct throtl_service_queue *parent_sq) static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
@ -572,7 +564,11 @@ static void throtl_enqueue_tg(struct throtl_grp *tg)
static void throtl_dequeue_tg(struct throtl_grp *tg) static void throtl_dequeue_tg(struct throtl_grp *tg)
{ {
if (tg->flags & THROTL_TG_PENDING) { if (tg->flags & THROTL_TG_PENDING) {
throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq); struct throtl_service_queue *parent_sq =
tg->service_queue.parent_sq;
throtl_rb_erase(&tg->rb_node, parent_sq);
--parent_sq->nr_pending;
tg->flags &= ~THROTL_TG_PENDING; tg->flags &= ~THROTL_TG_PENDING;
} }
} }
@ -639,6 +635,8 @@ static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
{ {
tg->bytes_disp[rw] = 0; tg->bytes_disp[rw] = 0;
tg->io_disp[rw] = 0; tg->io_disp[rw] = 0;
tg->carryover_bytes[rw] = 0;
tg->carryover_ios[rw] = 0;
/* /*
* Previous slice has expired. We must have trimmed it after last * Previous slice has expired. We must have trimmed it after last
@ -656,12 +654,17 @@ static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
tg->slice_end[rw], jiffies); tg->slice_end[rw], jiffies);
} }
static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw) static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw,
bool clear_carryover)
{ {
tg->bytes_disp[rw] = 0; tg->bytes_disp[rw] = 0;
tg->io_disp[rw] = 0; tg->io_disp[rw] = 0;
tg->slice_start[rw] = jiffies; tg->slice_start[rw] = jiffies;
tg->slice_end[rw] = jiffies + tg->td->throtl_slice; tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
if (clear_carryover) {
tg->carryover_bytes[rw] = 0;
tg->carryover_ios[rw] = 0;
}
throtl_log(&tg->service_queue, throtl_log(&tg->service_queue,
"[%c] new slice start=%lu end=%lu jiffies=%lu", "[%c] new slice start=%lu end=%lu jiffies=%lu",
@ -754,13 +757,76 @@ static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
tg->slice_start[rw], tg->slice_end[rw], jiffies); tg->slice_start[rw], tg->slice_end[rw], jiffies);
} }
static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio, static unsigned int calculate_io_allowed(u32 iops_limit,
u32 iops_limit, unsigned long *wait) unsigned long jiffy_elapsed)
{
unsigned int io_allowed;
u64 tmp;
/*
* jiffy_elapsed should not be a big value as minimum iops can be
* 1 then at max jiffy elapsed should be equivalent of 1 second as we
* will allow dispatch after 1 second and after that slice should
* have been trimmed.
*/
tmp = (u64)iops_limit * jiffy_elapsed;
do_div(tmp, HZ);
if (tmp > UINT_MAX)
io_allowed = UINT_MAX;
else
io_allowed = tmp;
return io_allowed;
}
static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed)
{
return mul_u64_u64_div_u64(bps_limit, (u64)jiffy_elapsed, (u64)HZ);
}
static void __tg_update_carryover(struct throtl_grp *tg, bool rw)
{
unsigned long jiffy_elapsed = jiffies - tg->slice_start[rw];
u64 bps_limit = tg_bps_limit(tg, rw);
u32 iops_limit = tg_iops_limit(tg, rw);
/*
* If config is updated while bios are still throttled, calculate and
* accumulate how many bytes/ios are waited across changes. And
* carryover_bytes/ios will be used to calculate new wait time under new
* configuration.
*/
if (bps_limit != U64_MAX)
tg->carryover_bytes[rw] +=
calculate_bytes_allowed(bps_limit, jiffy_elapsed) -
tg->bytes_disp[rw];
if (iops_limit != UINT_MAX)
tg->carryover_ios[rw] +=
calculate_io_allowed(iops_limit, jiffy_elapsed) -
tg->io_disp[rw];
}
static void tg_update_carryover(struct throtl_grp *tg)
{
if (tg->service_queue.nr_queued[READ])
__tg_update_carryover(tg, READ);
if (tg->service_queue.nr_queued[WRITE])
__tg_update_carryover(tg, WRITE);
/* see comments in struct throtl_grp for meaning of these fields. */
throtl_log(&tg->service_queue, "%s: %llu %llu %u %u\n", __func__,
tg->carryover_bytes[READ], tg->carryover_bytes[WRITE],
tg->carryover_ios[READ], tg->carryover_ios[WRITE]);
}
static bool tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio,
u32 iops_limit, unsigned long *wait)
{ {
bool rw = bio_data_dir(bio); bool rw = bio_data_dir(bio);
unsigned int io_allowed; unsigned int io_allowed;
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
u64 tmp;
if (iops_limit == UINT_MAX) { if (iops_limit == UINT_MAX) {
if (wait) if (wait)
@ -772,22 +838,8 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
/* Round up to the next throttle slice, wait time must be nonzero */ /* Round up to the next throttle slice, wait time must be nonzero */
jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice); jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed_rnd) +
/* tg->carryover_ios[rw];
* jiffy_elapsed_rnd should not be a big value as minimum iops can be
* 1 then at max jiffy elapsed should be equivalent of 1 second as we
* will allow dispatch after 1 second and after that slice should
* have been trimmed.
*/
tmp = (u64)iops_limit * jiffy_elapsed_rnd;
do_div(tmp, HZ);
if (tmp > UINT_MAX)
io_allowed = UINT_MAX;
else
io_allowed = tmp;
if (tg->io_disp[rw] + 1 <= io_allowed) { if (tg->io_disp[rw] + 1 <= io_allowed) {
if (wait) if (wait)
*wait = 0; *wait = 0;
@ -802,16 +854,16 @@ static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
return false; return false;
} }
static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, static bool tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio,
u64 bps_limit, unsigned long *wait) u64 bps_limit, unsigned long *wait)
{ {
bool rw = bio_data_dir(bio); bool rw = bio_data_dir(bio);
u64 bytes_allowed, extra_bytes, tmp; u64 bytes_allowed, extra_bytes;
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
unsigned int bio_size = throtl_bio_data_size(bio); unsigned int bio_size = throtl_bio_data_size(bio);
/* no need to throttle if this bio's bytes have been accounted */ /* no need to throttle if this bio's bytes have been accounted */
if (bps_limit == U64_MAX || bio_flagged(bio, BIO_THROTTLED)) { if (bps_limit == U64_MAX || bio_flagged(bio, BIO_BPS_THROTTLED)) {
if (wait) if (wait)
*wait = 0; *wait = 0;
return true; return true;
@ -824,11 +876,8 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
jiffy_elapsed_rnd = tg->td->throtl_slice; jiffy_elapsed_rnd = tg->td->throtl_slice;
jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed_rnd) +
tmp = bps_limit * jiffy_elapsed_rnd; tg->carryover_bytes[rw];
do_div(tmp, HZ);
bytes_allowed = tmp;
if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) { if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
if (wait) if (wait)
*wait = 0; *wait = 0;
@ -889,7 +938,7 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
* slice and it should be extended instead. * slice and it should be extended instead.
*/ */
if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw])) if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
throtl_start_new_slice(tg, rw); throtl_start_new_slice(tg, rw, true);
else { else {
if (time_before(tg->slice_end[rw], if (time_before(tg->slice_end[rw],
jiffies + tg->td->throtl_slice)) jiffies + tg->td->throtl_slice))
@ -897,8 +946,8 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
jiffies + tg->td->throtl_slice); jiffies + tg->td->throtl_slice);
} }
if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) && if (tg_within_bps_limit(tg, bio, bps_limit, &bps_wait) &&
tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) { tg_within_iops_limit(tg, bio, iops_limit, &iops_wait)) {
if (wait) if (wait)
*wait = 0; *wait = 0;
return true; return true;
@ -921,22 +970,13 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
unsigned int bio_size = throtl_bio_data_size(bio); unsigned int bio_size = throtl_bio_data_size(bio);
/* Charge the bio to the group */ /* Charge the bio to the group */
if (!bio_flagged(bio, BIO_THROTTLED)) { if (!bio_flagged(bio, BIO_BPS_THROTTLED)) {
tg->bytes_disp[rw] += bio_size; tg->bytes_disp[rw] += bio_size;
tg->last_bytes_disp[rw] += bio_size; tg->last_bytes_disp[rw] += bio_size;
} }
tg->io_disp[rw]++; tg->io_disp[rw]++;
tg->last_io_disp[rw]++; tg->last_io_disp[rw]++;
/*
* BIO_THROTTLED is used to prevent the same bio to be throttled
* more than once as a throttled bio will go through blk-throtl the
* second time when it eventually gets issued. Set it when a bio
* is being charged to a tg.
*/
if (!bio_flagged(bio, BIO_THROTTLED))
bio_set_flag(bio, BIO_THROTTLED);
} }
/** /**
@ -990,9 +1030,9 @@ static void tg_update_disptime(struct throtl_grp *tg)
disptime = jiffies + min_wait; disptime = jiffies + min_wait;
/* Update dispatch time */ /* Update dispatch time */
throtl_dequeue_tg(tg); throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
tg->disptime = disptime; tg->disptime = disptime;
throtl_enqueue_tg(tg); tg_service_queue_add(tg);
/* see throtl_add_bio_tg() */ /* see throtl_add_bio_tg() */
tg->flags &= ~THROTL_TG_WAS_EMPTY; tg->flags &= ~THROTL_TG_WAS_EMPTY;
@ -1026,6 +1066,7 @@ static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
sq->nr_queued[rw]--; sq->nr_queued[rw]--;
throtl_charge_bio(tg, bio); throtl_charge_bio(tg, bio);
bio_set_flag(bio, BIO_BPS_THROTTLED);
/* /*
* If our parent is another tg, we just need to transfer @bio to * If our parent is another tg, we just need to transfer @bio to
@ -1101,13 +1142,13 @@ static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
if (time_before(jiffies, tg->disptime)) if (time_before(jiffies, tg->disptime))
break; break;
throtl_dequeue_tg(tg);
nr_disp += throtl_dispatch_tg(tg); nr_disp += throtl_dispatch_tg(tg);
sq = &tg->service_queue; sq = &tg->service_queue;
if (sq->nr_queued[0] || sq->nr_queued[1]) if (sq->nr_queued[READ] || sq->nr_queued[WRITE])
tg_update_disptime(tg); tg_update_disptime(tg);
else
throtl_dequeue_tg(tg);
if (nr_disp >= THROTL_QUANTUM) if (nr_disp >= THROTL_QUANTUM)
break; break;
@ -1321,8 +1362,8 @@ static void tg_conf_updated(struct throtl_grp *tg, bool global)
* that a group's limit are dropped suddenly and we don't want to * that a group's limit are dropped suddenly and we don't want to
* account recently dispatched IO with new low rate. * account recently dispatched IO with new low rate.
*/ */
throtl_start_new_slice(tg, READ); throtl_start_new_slice(tg, READ, false);
throtl_start_new_slice(tg, WRITE); throtl_start_new_slice(tg, WRITE, false);
if (tg->flags & THROTL_TG_PENDING) { if (tg->flags & THROTL_TG_PENDING) {
tg_update_disptime(tg); tg_update_disptime(tg);
@ -1350,6 +1391,7 @@ static ssize_t tg_set_conf(struct kernfs_open_file *of,
v = U64_MAX; v = U64_MAX;
tg = blkg_to_tg(ctx.blkg); tg = blkg_to_tg(ctx.blkg);
tg_update_carryover(tg);
if (is_u64) if (is_u64)
*(u64 *)((void *)tg + of_cft(of)->private) = v; *(u64 *)((void *)tg + of_cft(of)->private) = v;
@ -1536,6 +1578,7 @@ static ssize_t tg_set_limit(struct kernfs_open_file *of,
return ret; return ret;
tg = blkg_to_tg(ctx.blkg); tg = blkg_to_tg(ctx.blkg);
tg_update_carryover(tg);
v[0] = tg->bps_conf[READ][index]; v[0] = tg->bps_conf[READ][index];
v[1] = tg->bps_conf[WRITE][index]; v[1] = tg->bps_conf[WRITE][index];
@ -1673,6 +1716,41 @@ struct blkcg_policy blkcg_policy_throtl = {
.pd_free_fn = throtl_pd_free, .pd_free_fn = throtl_pd_free,
}; };
void blk_throtl_cancel_bios(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
struct cgroup_subsys_state *pos_css;
struct blkcg_gq *blkg;
spin_lock_irq(&q->queue_lock);
/*
* queue_lock is held, rcu lock is not needed here technically.
* However, rcu lock is still held to emphasize that following
* path need RCU protection and to prevent warning from lockdep.
*/
rcu_read_lock();
blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
struct throtl_grp *tg = blkg_to_tg(blkg);
struct throtl_service_queue *sq = &tg->service_queue;
/*
* Set the flag to make sure throtl_pending_timer_fn() won't
* stop until all throttled bios are dispatched.
*/
blkg_to_tg(blkg)->flags |= THROTL_TG_CANCELING;
/*
* Update disptime after setting the above flag to make sure
* throtl_select_dispatch() won't exit without dispatching.
*/
tg_update_disptime(tg);
throtl_schedule_pending_timer(sq, jiffies + 1);
}
rcu_read_unlock();
spin_unlock_irq(&q->queue_lock);
}
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg) static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
{ {
unsigned long rtime = jiffies, wtime = jiffies; unsigned long rtime = jiffies, wtime = jiffies;
@ -1777,39 +1855,6 @@ static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
return false; return false;
} }
void blk_throtl_cancel_bios(struct request_queue *q)
{
struct cgroup_subsys_state *pos_css;
struct blkcg_gq *blkg;
spin_lock_irq(&q->queue_lock);
/*
* queue_lock is held, rcu lock is not needed here technically.
* However, rcu lock is still held to emphasize that following
* path need RCU protection and to prevent warning from lockdep.
*/
rcu_read_lock();
blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) {
struct throtl_grp *tg = blkg_to_tg(blkg);
struct throtl_service_queue *sq = &tg->service_queue;
/*
* Set the flag to make sure throtl_pending_timer_fn() won't
* stop until all throttled bios are dispatched.
*/
blkg_to_tg(blkg)->flags |= THROTL_TG_CANCELING;
/*
* Update disptime after setting the above flag to make sure
* throtl_select_dispatch() won't exit without dispatching.
*/
tg_update_disptime(tg);
throtl_schedule_pending_timer(sq, jiffies + 1);
}
rcu_read_unlock();
spin_unlock_irq(&q->queue_lock);
}
static bool throtl_can_upgrade(struct throtl_data *td, static bool throtl_can_upgrade(struct throtl_data *td,
struct throtl_grp *this_tg) struct throtl_grp *this_tg)
{ {
@ -2005,7 +2050,6 @@ static void blk_throtl_update_idletime(struct throtl_grp *tg)
tg->checked_last_finish_time = last_finish_time; tg->checked_last_finish_time = last_finish_time;
} }
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
static void throtl_update_latency_buckets(struct throtl_data *td) static void throtl_update_latency_buckets(struct throtl_data *td)
{ {
struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE]; struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE];
@ -2086,6 +2130,28 @@ static void throtl_update_latency_buckets(struct throtl_data *td)
static inline void throtl_update_latency_buckets(struct throtl_data *td) static inline void throtl_update_latency_buckets(struct throtl_data *td)
{ {
} }
static void blk_throtl_update_idletime(struct throtl_grp *tg)
{
}
static void throtl_downgrade_check(struct throtl_grp *tg)
{
}
static void throtl_upgrade_check(struct throtl_grp *tg)
{
}
static bool throtl_can_upgrade(struct throtl_data *td,
struct throtl_grp *this_tg)
{
return false;
}
static void throtl_upgrade_state(struct throtl_data *td)
{
}
#endif #endif
bool __blk_throtl_bio(struct bio *bio) bool __blk_throtl_bio(struct bio *bio)
@ -2159,8 +2225,10 @@ bool __blk_throtl_bio(struct bio *bio)
qn = &tg->qnode_on_parent[rw]; qn = &tg->qnode_on_parent[rw];
sq = sq->parent_sq; sq = sq->parent_sq;
tg = sq_to_tg(sq); tg = sq_to_tg(sq);
if (!tg) if (!tg) {
bio_set_flag(bio, BIO_BPS_THROTTLED);
goto out_unlock; goto out_unlock;
}
} }
/* out-of-limit, queue to @tg */ /* out-of-limit, queue to @tg */
@ -2189,8 +2257,6 @@ bool __blk_throtl_bio(struct bio *bio)
} }
out_unlock: out_unlock:
bio_set_flag(bio, BIO_THROTTLED);
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
if (throttled || !td->track_bio_latency) if (throttled || !td->track_bio_latency)
bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY; bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY;
@ -2286,8 +2352,9 @@ void blk_throtl_bio_endio(struct bio *bio)
} }
#endif #endif
int blk_throtl_init(struct request_queue *q) int blk_throtl_init(struct gendisk *disk)
{ {
struct request_queue *q = disk->queue;
struct throtl_data *td; struct throtl_data *td;
int ret; int ret;
@ -2329,8 +2396,10 @@ int blk_throtl_init(struct request_queue *q)
return ret; return ret;
} }
void blk_throtl_exit(struct request_queue *q) void blk_throtl_exit(struct gendisk *disk)
{ {
struct request_queue *q = disk->queue;
BUG_ON(!q->td); BUG_ON(!q->td);
del_timer_sync(&q->td->service_queue.pending_timer); del_timer_sync(&q->td->service_queue.pending_timer);
throtl_shutdown_wq(q); throtl_shutdown_wq(q);
@ -2340,8 +2409,9 @@ void blk_throtl_exit(struct request_queue *q)
kfree(q->td); kfree(q->td);
} }
void blk_throtl_register_queue(struct request_queue *q) void blk_throtl_register(struct gendisk *disk)
{ {
struct request_queue *q = disk->queue;
struct throtl_data *td; struct throtl_data *td;
int i; int i;

View File

@ -55,8 +55,7 @@ struct throtl_service_queue {
enum tg_state_flags { enum tg_state_flags {
THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */ THROTL_TG_PENDING = 1 << 0, /* on parent's pending tree */
THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */ THROTL_TG_WAS_EMPTY = 1 << 1, /* bio_lists[] became non-empty */
THROTL_TG_HAS_IOPS_LIMIT = 1 << 2, /* tg has iops limit */ THROTL_TG_CANCELING = 1 << 2, /* starts to cancel bio */
THROTL_TG_CANCELING = 1 << 3, /* starts to cancel bio */
}; };
enum { enum {
@ -99,7 +98,8 @@ struct throtl_grp {
unsigned int flags; unsigned int flags;
/* are there any throtl rules between this group and td? */ /* are there any throtl rules between this group and td? */
bool has_rules[2]; bool has_rules_bps[2];
bool has_rules_iops[2];
/* internally used bytes per second rate limits */ /* internally used bytes per second rate limits */
uint64_t bps[2][LIMIT_CNT]; uint64_t bps[2][LIMIT_CNT];
@ -121,6 +121,15 @@ struct throtl_grp {
uint64_t last_bytes_disp[2]; uint64_t last_bytes_disp[2];
unsigned int last_io_disp[2]; unsigned int last_io_disp[2];
/*
* The following two fields are updated when new configuration is
* submitted while some bios are still throttled, they record how many
* bytes/ios are waited already in previous configuration, and they will
* be used to calculate wait time under new configuration.
*/
uint64_t carryover_bytes[2];
unsigned int carryover_ios[2];
unsigned long last_check_time; unsigned long last_check_time;
unsigned long latency_target; /* us */ unsigned long latency_target; /* us */
@ -159,27 +168,37 @@ static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
* Internal throttling interface * Internal throttling interface
*/ */
#ifndef CONFIG_BLK_DEV_THROTTLING #ifndef CONFIG_BLK_DEV_THROTTLING
static inline int blk_throtl_init(struct request_queue *q) { return 0; } static inline int blk_throtl_init(struct gendisk *disk) { return 0; }
static inline void blk_throtl_exit(struct request_queue *q) { } static inline void blk_throtl_exit(struct gendisk *disk) { }
static inline void blk_throtl_register_queue(struct request_queue *q) { } static inline void blk_throtl_register(struct gendisk *disk) { }
static inline bool blk_throtl_bio(struct bio *bio) { return false; } static inline bool blk_throtl_bio(struct bio *bio) { return false; }
static inline void blk_throtl_cancel_bios(struct request_queue *q) { } static inline void blk_throtl_cancel_bios(struct gendisk *disk) { }
#else /* CONFIG_BLK_DEV_THROTTLING */ #else /* CONFIG_BLK_DEV_THROTTLING */
int blk_throtl_init(struct request_queue *q); int blk_throtl_init(struct gendisk *disk);
void blk_throtl_exit(struct request_queue *q); void blk_throtl_exit(struct gendisk *disk);
void blk_throtl_register_queue(struct request_queue *q); void blk_throtl_register(struct gendisk *disk);
bool __blk_throtl_bio(struct bio *bio); bool __blk_throtl_bio(struct bio *bio);
void blk_throtl_cancel_bios(struct request_queue *q); void blk_throtl_cancel_bios(struct gendisk *disk);
static inline bool blk_throtl_bio(struct bio *bio)
static inline bool blk_should_throtl(struct bio *bio)
{ {
struct throtl_grp *tg = blkg_to_tg(bio->bi_blkg); struct throtl_grp *tg = blkg_to_tg(bio->bi_blkg);
int rw = bio_data_dir(bio);
/* no need to throttle bps any more if the bio has been throttled */ /* iops limit is always counted */
if (bio_flagged(bio, BIO_THROTTLED) && if (tg->has_rules_iops[rw])
!(tg->flags & THROTL_TG_HAS_IOPS_LIMIT)) return true;
return false;
if (!tg->has_rules[bio_data_dir(bio)]) if (tg->has_rules_bps[rw] && !bio_flagged(bio, BIO_BPS_THROTTLED))
return true;
return false;
}
static inline bool blk_throtl_bio(struct bio *bio)
{
if (!blk_should_throtl(bio))
return false; return false;
return __blk_throtl_bio(bio); return __blk_throtl_bio(bio);

View File

@ -841,8 +841,11 @@ int wbt_init(struct request_queue *q)
rwb->last_comp = rwb->last_issue = jiffies; rwb->last_comp = rwb->last_issue = jiffies;
rwb->win_nsec = RWB_WINDOW_NSEC; rwb->win_nsec = RWB_WINDOW_NSEC;
rwb->enable_state = WBT_STATE_ON_DEFAULT; rwb->enable_state = WBT_STATE_ON_DEFAULT;
rwb->wc = 1; rwb->wc = test_bit(QUEUE_FLAG_WC, &q->queue_flags);
rwb->rq_depth.default_depth = RWB_DEF_DEPTH; rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
rwb->min_lat_nsec = wbt_default_latency_nsec(q);
wbt_queue_depth_changed(&rwb->rqos);
/* /*
* Assign rwb and add the stats callback. * Assign rwb and add the stats callback.
@ -853,11 +856,6 @@ int wbt_init(struct request_queue *q)
blk_stat_add_callback(q, rwb->cb); blk_stat_add_callback(q, rwb->cb);
rwb->min_lat_nsec = wbt_default_latency_nsec(q);
wbt_queue_depth_changed(&rwb->rqos);
wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
return 0; return 0;
err_free: err_free:

View File

@ -63,13 +63,10 @@ bool blk_req_needs_zone_write_lock(struct request *rq)
if (!rq->q->disk->seq_zones_wlock) if (!rq->q->disk->seq_zones_wlock)
return false; return false;
switch (req_op(rq)) { if (bdev_op_is_zoned_write(rq->q->disk->part0, req_op(rq)))
case REQ_OP_WRITE_ZEROES:
case REQ_OP_WRITE:
return blk_rq_zone_is_seq(rq); return blk_rq_zone_is_seq(rq);
default:
return false; return false;
}
} }
EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock); EXPORT_SYMBOL_GPL(blk_req_needs_zone_write_lock);

View File

@ -88,6 +88,13 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset; phys_addr_t addr1 = page_to_phys(vec1->bv_page) + vec1->bv_offset;
phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset; phys_addr_t addr2 = page_to_phys(vec2->bv_page) + vec2->bv_offset;
/*
* Merging adjacent physical pages may not work correctly under KMSAN
* if their metadata pages aren't adjacent. Just disable merging.
*/
if (IS_ENABLED(CONFIG_KMSAN))
return false;
if (addr1 + vec1->bv_len != addr2) if (addr1 + vec1->bv_len != addr2)
return false; return false;
if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page)) if (xen_domain() && !xen_biovec_phys_mergeable(vec1, vec2->bv_page))
@ -270,8 +277,7 @@ bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
void blk_insert_flush(struct request *rq); void blk_insert_flush(struct request *rq);
int elevator_switch_mq(struct request_queue *q, int elevator_switch(struct request_queue *q, struct elevator_type *new_e);
struct elevator_type *new_e);
void elevator_exit(struct request_queue *q); void elevator_exit(struct request_queue *q);
int elv_register_queue(struct request_queue *q, bool uevent); int elv_register_queue(struct request_queue *q, bool uevent);
void elv_unregister_queue(struct request_queue *q); void elv_unregister_queue(struct request_queue *q);
@ -325,6 +331,7 @@ void blk_rq_set_mixed_merge(struct request *rq);
bool blk_rq_merge_ok(struct request *rq, struct bio *bio); bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio); enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
void blk_set_default_limits(struct queue_limits *lim);
int blk_dev_init(void); int blk_dev_init(void);
/* /*
@ -389,9 +396,9 @@ static inline struct bio *blk_queue_bounce(struct bio *bio,
} }
#ifdef CONFIG_BLK_CGROUP_IOLATENCY #ifdef CONFIG_BLK_CGROUP_IOLATENCY
extern int blk_iolatency_init(struct request_queue *q); int blk_iolatency_init(struct gendisk *disk);
#else #else
static inline int blk_iolatency_init(struct request_queue *q) { return 0; } static inline int blk_iolatency_init(struct gendisk *disk) { return 0; };
#endif #endif
#ifdef CONFIG_BLK_DEV_ZONED #ifdef CONFIG_BLK_DEV_ZONED

View File

@ -588,7 +588,7 @@ void elv_unregister(struct elevator_type *e)
} }
EXPORT_SYMBOL_GPL(elv_unregister); EXPORT_SYMBOL_GPL(elv_unregister);
int elevator_switch_mq(struct request_queue *q, static int elevator_switch_mq(struct request_queue *q,
struct elevator_type *new_e) struct elevator_type *new_e)
{ {
int ret; int ret;
@ -723,7 +723,7 @@ void elevator_init_mq(struct request_queue *q)
* need for the new one. this way we have a chance of going back to the old * need for the new one. this way we have a chance of going back to the old
* one, if the new one fails init for some reason. * one, if the new one fails init for some reason.
*/ */
static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
{ {
int err; int err;

View File

@ -410,9 +410,10 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
* Otherwise just allocate the device numbers for both the whole device * Otherwise just allocate the device numbers for both the whole device
* and all partitions from the extended dev_t space. * and all partitions from the extended dev_t space.
*/ */
ret = -EINVAL;
if (disk->major) { if (disk->major) {
if (WARN_ON(!disk->minors)) if (WARN_ON(!disk->minors))
return -EINVAL; goto out_exit_elevator;
if (disk->minors > DISK_MAX_PARTS) { if (disk->minors > DISK_MAX_PARTS) {
pr_err("block: can't allocate more than %d partitions\n", pr_err("block: can't allocate more than %d partitions\n",
@ -420,14 +421,14 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
disk->minors = DISK_MAX_PARTS; disk->minors = DISK_MAX_PARTS;
} }
if (disk->first_minor + disk->minors > MINORMASK + 1) if (disk->first_minor + disk->minors > MINORMASK + 1)
return -EINVAL; goto out_exit_elevator;
} else { } else {
if (WARN_ON(disk->minors)) if (WARN_ON(disk->minors))
return -EINVAL; goto out_exit_elevator;
ret = blk_alloc_ext_minor(); ret = blk_alloc_ext_minor();
if (ret < 0) if (ret < 0)
return ret; goto out_exit_elevator;
disk->major = BLOCK_EXT_MAJOR; disk->major = BLOCK_EXT_MAJOR;
disk->first_minor = ret; disk->first_minor = ret;
} }
@ -507,6 +508,13 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
*/ */
dev_set_uevent_suppress(ddev, 0); dev_set_uevent_suppress(ddev, 0);
disk_uevent(disk, KOBJ_ADD); disk_uevent(disk, KOBJ_ADD);
} else {
/*
* Even if the block_device for a hidden gendisk is not
* registered, it needs to have a valid bd_dev so that the
* freeing of the dynamic major works.
*/
disk->part0->bd_dev = MKDEV(disk->major, disk->first_minor);
} }
disk_update_readahead(disk); disk_update_readahead(disk);
@ -519,6 +527,7 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
bdi_unregister(disk->bdi); bdi_unregister(disk->bdi);
out_unregister_queue: out_unregister_queue:
blk_unregister_queue(disk); blk_unregister_queue(disk);
rq_qos_exit(disk->queue);
out_put_slave_dir: out_put_slave_dir:
kobject_put(disk->slave_dir); kobject_put(disk->slave_dir);
out_put_holder_dir: out_put_holder_dir:
@ -533,6 +542,9 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
out_free_ext_minor: out_free_ext_minor:
if (disk->major == BLOCK_EXT_MAJOR) if (disk->major == BLOCK_EXT_MAJOR)
blk_free_ext_minor(disk->first_minor); blk_free_ext_minor(disk->first_minor);
out_exit_elevator:
if (disk->queue->elevator)
elevator_exit(disk->queue);
return ret; return ret;
} }
EXPORT_SYMBOL(device_add_disk); EXPORT_SYMBOL(device_add_disk);
@ -602,7 +614,6 @@ void del_gendisk(struct gendisk *disk)
* Prevent new I/O from crossing bio_queue_enter(). * Prevent new I/O from crossing bio_queue_enter().
*/ */
blk_queue_start_drain(q); blk_queue_start_drain(q);
blk_mq_freeze_queue_wait(q);
if (!(disk->flags & GENHD_FL_HIDDEN)) { if (!(disk->flags & GENHD_FL_HIDDEN)) {
sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
@ -626,7 +637,9 @@ void del_gendisk(struct gendisk *disk)
pm_runtime_set_memalloc_noio(disk_to_dev(disk), false); pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
device_del(disk_to_dev(disk)); device_del(disk_to_dev(disk));
blk_throtl_cancel_bios(disk->queue); blk_mq_freeze_queue_wait(q);
blk_throtl_cancel_bios(disk);
blk_sync_queue(q); blk_sync_queue(q);
blk_flush_integrity(); blk_flush_integrity();
@ -1150,7 +1163,8 @@ static void disk_release(struct device *dev)
!test_bit(GD_ADDED, &disk->state)) !test_bit(GD_ADDED, &disk->state))
blk_mq_exit_queue(disk->queue); blk_mq_exit_queue(disk->queue);
blkcg_exit_queue(disk->queue); blkcg_exit_disk(disk);
bioset_exit(&disk->bio_split); bioset_exit(&disk->bio_split);
disk_release_events(disk); disk_release_events(disk);
@ -1363,7 +1377,7 @@ struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id,
if (xa_insert(&disk->part_tbl, 0, disk->part0, GFP_KERNEL)) if (xa_insert(&disk->part_tbl, 0, disk->part0, GFP_KERNEL))
goto out_destroy_part_tbl; goto out_destroy_part_tbl;
if (blkcg_init_queue(q)) if (blkcg_init_disk(disk))
goto out_erase_part0; goto out_erase_part0;
rand_initialize_disk(disk); rand_initialize_disk(disk);

View File

@ -39,7 +39,12 @@ enum opal_response_token {
#define FIRST_TPER_SESSION_NUM 4096 #define FIRST_TPER_SESSION_NUM 4096
#define TPER_SYNC_SUPPORTED 0x01 #define TPER_SYNC_SUPPORTED 0x01
/* FC_LOCKING features */
#define LOCKING_SUPPORTED_MASK 0x01
#define LOCKING_ENABLED_MASK 0x02
#define LOCKED_MASK 0x04
#define MBR_ENABLED_MASK 0x10 #define MBR_ENABLED_MASK 0x10
#define MBR_DONE_MASK 0x20
#define TINY_ATOM_DATA_MASK 0x3F #define TINY_ATOM_DATA_MASK 0x3F
#define TINY_ATOM_SIGNED 0x40 #define TINY_ATOM_SIGNED 0x40

View File

@ -74,8 +74,7 @@ struct parsed_resp {
}; };
struct opal_dev { struct opal_dev {
bool supported; u32 flags;
bool mbr_enabled;
void *data; void *data;
sec_send_recv *send_recv; sec_send_recv *send_recv;
@ -88,8 +87,8 @@ struct opal_dev {
u64 lowest_lba; u64 lowest_lba;
size_t pos; size_t pos;
u8 cmd[IO_BUFFER_LENGTH]; u8 *cmd;
u8 resp[IO_BUFFER_LENGTH]; u8 *resp;
struct parsed_resp parsed; struct parsed_resp parsed;
size_t prev_d_len; size_t prev_d_len;
@ -280,6 +279,30 @@ static bool check_tper(const void *data)
return true; return true;
} }
static bool check_lcksuppt(const void *data)
{
const struct d0_locking_features *lfeat = data;
u8 sup_feat = lfeat->supported_features;
return !!(sup_feat & LOCKING_SUPPORTED_MASK);
}
static bool check_lckenabled(const void *data)
{
const struct d0_locking_features *lfeat = data;
u8 sup_feat = lfeat->supported_features;
return !!(sup_feat & LOCKING_ENABLED_MASK);
}
static bool check_locked(const void *data)
{
const struct d0_locking_features *lfeat = data;
u8 sup_feat = lfeat->supported_features;
return !!(sup_feat & LOCKED_MASK);
}
static bool check_mbrenabled(const void *data) static bool check_mbrenabled(const void *data)
{ {
const struct d0_locking_features *lfeat = data; const struct d0_locking_features *lfeat = data;
@ -288,6 +311,14 @@ static bool check_mbrenabled(const void *data)
return !!(sup_feat & MBR_ENABLED_MASK); return !!(sup_feat & MBR_ENABLED_MASK);
} }
static bool check_mbrdone(const void *data)
{
const struct d0_locking_features *lfeat = data;
u8 sup_feat = lfeat->supported_features;
return !!(sup_feat & MBR_DONE_MASK);
}
static bool check_sum(const void *data) static bool check_sum(const void *data)
{ {
const struct d0_single_user_mode *sum = data; const struct d0_single_user_mode *sum = data;
@ -435,7 +466,7 @@ static int opal_discovery0_end(struct opal_dev *dev)
u32 hlen = be32_to_cpu(hdr->length); u32 hlen = be32_to_cpu(hdr->length);
print_buffer(dev->resp, hlen); print_buffer(dev->resp, hlen);
dev->mbr_enabled = false; dev->flags &= OPAL_FL_SUPPORTED;
if (hlen > IO_BUFFER_LENGTH - sizeof(*hdr)) { if (hlen > IO_BUFFER_LENGTH - sizeof(*hdr)) {
pr_debug("Discovery length overflows buffer (%zu+%u)/%u\n", pr_debug("Discovery length overflows buffer (%zu+%u)/%u\n",
@ -461,7 +492,16 @@ static int opal_discovery0_end(struct opal_dev *dev)
check_geometry(dev, body); check_geometry(dev, body);
break; break;
case FC_LOCKING: case FC_LOCKING:
dev->mbr_enabled = check_mbrenabled(body->features); if (check_lcksuppt(body->features))
dev->flags |= OPAL_FL_LOCKING_SUPPORTED;
if (check_lckenabled(body->features))
dev->flags |= OPAL_FL_LOCKING_ENABLED;
if (check_locked(body->features))
dev->flags |= OPAL_FL_LOCKED;
if (check_mbrenabled(body->features))
dev->flags |= OPAL_FL_MBR_ENABLED;
if (check_mbrdone(body->features))
dev->flags |= OPAL_FL_MBR_DONE;
break; break;
case FC_ENTERPRISE: case FC_ENTERPRISE:
case FC_DATASTORE: case FC_DATASTORE:
@ -2109,7 +2149,8 @@ static int check_opal_support(struct opal_dev *dev)
mutex_lock(&dev->dev_lock); mutex_lock(&dev->dev_lock);
setup_opal_dev(dev); setup_opal_dev(dev);
ret = opal_discovery0_step(dev); ret = opal_discovery0_step(dev);
dev->supported = !ret; if (!ret)
dev->flags |= OPAL_FL_SUPPORTED;
mutex_unlock(&dev->dev_lock); mutex_unlock(&dev->dev_lock);
return ret; return ret;
@ -2134,6 +2175,8 @@ void free_opal_dev(struct opal_dev *dev)
return; return;
clean_opal_dev(dev); clean_opal_dev(dev);
kfree(dev->resp);
kfree(dev->cmd);
kfree(dev); kfree(dev);
} }
EXPORT_SYMBOL(free_opal_dev); EXPORT_SYMBOL(free_opal_dev);
@ -2146,17 +2189,40 @@ struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv)
if (!dev) if (!dev)
return NULL; return NULL;
/*
* Presumably DMA-able buffers must be cache-aligned. Kmalloc makes
* sure the allocated buffer is DMA-safe in that regard.
*/
dev->cmd = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL);
if (!dev->cmd)
goto err_free_dev;
dev->resp = kmalloc(IO_BUFFER_LENGTH, GFP_KERNEL);
if (!dev->resp)
goto err_free_cmd;
INIT_LIST_HEAD(&dev->unlk_lst); INIT_LIST_HEAD(&dev->unlk_lst);
mutex_init(&dev->dev_lock); mutex_init(&dev->dev_lock);
dev->flags = 0;
dev->data = data; dev->data = data;
dev->send_recv = send_recv; dev->send_recv = send_recv;
if (check_opal_support(dev) != 0) { if (check_opal_support(dev) != 0) {
pr_debug("Opal is not supported on this device\n"); pr_debug("Opal is not supported on this device\n");
kfree(dev); goto err_free_resp;
return NULL;
} }
return dev; return dev;
err_free_resp:
kfree(dev->resp);
err_free_cmd:
kfree(dev->cmd);
err_free_dev:
kfree(dev);
return NULL;
} }
EXPORT_SYMBOL(init_opal_dev); EXPORT_SYMBOL(init_opal_dev);
@ -2528,7 +2594,7 @@ bool opal_unlock_from_suspend(struct opal_dev *dev)
if (!dev) if (!dev)
return false; return false;
if (!dev->supported) if (!(dev->flags & OPAL_FL_SUPPORTED))
return false; return false;
mutex_lock(&dev->dev_lock); mutex_lock(&dev->dev_lock);
@ -2546,7 +2612,7 @@ bool opal_unlock_from_suspend(struct opal_dev *dev)
was_failure = true; was_failure = true;
} }
if (dev->mbr_enabled) { if (dev->flags & OPAL_FL_MBR_ENABLED) {
ret = __opal_set_mbr_done(dev, &suspend->unlk.session.opal_key); ret = __opal_set_mbr_done(dev, &suspend->unlk.session.opal_key);
if (ret) if (ret)
pr_debug("Failed to set MBR Done in S3 resume\n"); pr_debug("Failed to set MBR Done in S3 resume\n");
@ -2620,6 +2686,23 @@ static int opal_generic_read_write_table(struct opal_dev *dev,
return ret; return ret;
} }
static int opal_get_status(struct opal_dev *dev, void __user *data)
{
struct opal_status sts = {0};
/*
* check_opal_support() error is not fatal,
* !dev->supported is a valid condition
*/
if (!check_opal_support(dev))
sts.flags = dev->flags;
if (copy_to_user(data, &sts, sizeof(sts))) {
pr_debug("Error copying status to userspace\n");
return -EFAULT;
}
return 0;
}
int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *arg) int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *arg)
{ {
void *p; void *p;
@ -2629,12 +2712,14 @@ int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *arg)
return -EACCES; return -EACCES;
if (!dev) if (!dev)
return -ENOTSUPP; return -ENOTSUPP;
if (!dev->supported) if (!(dev->flags & OPAL_FL_SUPPORTED))
return -ENOTSUPP; return -ENOTSUPP;
p = memdup_user(arg, _IOC_SIZE(cmd)); if (cmd & IOC_IN) {
if (IS_ERR(p)) p = memdup_user(arg, _IOC_SIZE(cmd));
return PTR_ERR(p); if (IS_ERR(p))
return PTR_ERR(p);
}
switch (cmd) { switch (cmd) {
case IOC_OPAL_SAVE: case IOC_OPAL_SAVE:
@ -2685,11 +2770,15 @@ int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *arg)
case IOC_OPAL_GENERIC_TABLE_RW: case IOC_OPAL_GENERIC_TABLE_RW:
ret = opal_generic_read_write_table(dev, p); ret = opal_generic_read_write_table(dev, p);
break; break;
case IOC_OPAL_GET_STATUS:
ret = opal_get_status(dev, arg);
break;
default: default:
break; break;
} }
kfree(p); if (cmd & IOC_IN)
kfree(p);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(sed_ioctl); EXPORT_SYMBOL_GPL(sed_ioctl);

View File

@ -43,7 +43,7 @@ config SYSTEM_TRUSTED_KEYRING
bool "Provide system-wide ring of trusted keys" bool "Provide system-wide ring of trusted keys"
depends on KEYS depends on KEYS
depends on ASYMMETRIC_KEY_TYPE depends on ASYMMETRIC_KEY_TYPE
depends on X509_CERTIFICATE_PARSER depends on X509_CERTIFICATE_PARSER = y
help help
Provide a system keyring to which trusted keys can be added. Keys in Provide a system keyring to which trusted keys can be added. Keys in
the keyring are considered to be trusted. Keys may be added at will the keyring are considered to be trusted. Keys may be added at will

File diff suppressed because it is too large Load Diff

View File

@ -149,7 +149,7 @@ obj-$(CONFIG_CRYPTO_TEA) += tea.o
obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
obj-$(CONFIG_CRYPTO_SEED) += seed.o obj-$(CONFIG_CRYPTO_SEED) += seed.o
obj-$(CONFIG_CRYPTO_ARIA) += aria.o obj-$(CONFIG_CRYPTO_ARIA) += aria_generic.o
obj-$(CONFIG_CRYPTO_CHACHA20) += chacha_generic.o obj-$(CONFIG_CRYPTO_CHACHA20) += chacha_generic.o
obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o obj-$(CONFIG_CRYPTO_DEFLATE) += deflate.o

View File

@ -120,6 +120,12 @@ static int akcipher_default_op(struct akcipher_request *req)
return -ENOSYS; return -ENOSYS;
} }
static int akcipher_default_set_key(struct crypto_akcipher *tfm,
const void *key, unsigned int keylen)
{
return -ENOSYS;
}
int crypto_register_akcipher(struct akcipher_alg *alg) int crypto_register_akcipher(struct akcipher_alg *alg)
{ {
struct crypto_alg *base = &alg->base; struct crypto_alg *base = &alg->base;
@ -132,6 +138,8 @@ int crypto_register_akcipher(struct akcipher_alg *alg)
alg->encrypt = akcipher_default_op; alg->encrypt = akcipher_default_op;
if (!alg->decrypt) if (!alg->decrypt)
alg->decrypt = akcipher_default_op; alg->decrypt = akcipher_default_op;
if (!alg->set_priv_key)
alg->set_priv_key = akcipher_default_set_key;
akcipher_prepare_alg(alg); akcipher_prepare_alg(alg);
return crypto_register_alg(base); return crypto_register_alg(base);

View File

@ -997,77 +997,6 @@ void crypto_inc(u8 *a, unsigned int size)
} }
EXPORT_SYMBOL_GPL(crypto_inc); EXPORT_SYMBOL_GPL(crypto_inc);
void __crypto_xor(u8 *dst, const u8 *src1, const u8 *src2, unsigned int len)
{
int relalign = 0;
if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
int size = sizeof(unsigned long);
int d = (((unsigned long)dst ^ (unsigned long)src1) |
((unsigned long)dst ^ (unsigned long)src2)) &
(size - 1);
relalign = d ? 1 << __ffs(d) : size;
/*
* If we care about alignment, process as many bytes as
* needed to advance dst and src to values whose alignments
* equal their relative alignment. This will allow us to
* process the remainder of the input using optimal strides.
*/
while (((unsigned long)dst & (relalign - 1)) && len > 0) {
*dst++ = *src1++ ^ *src2++;
len--;
}
}
while (IS_ENABLED(CONFIG_64BIT) && len >= 8 && !(relalign & 7)) {
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
u64 l = get_unaligned((u64 *)src1) ^
get_unaligned((u64 *)src2);
put_unaligned(l, (u64 *)dst);
} else {
*(u64 *)dst = *(u64 *)src1 ^ *(u64 *)src2;
}
dst += 8;
src1 += 8;
src2 += 8;
len -= 8;
}
while (len >= 4 && !(relalign & 3)) {
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
u32 l = get_unaligned((u32 *)src1) ^
get_unaligned((u32 *)src2);
put_unaligned(l, (u32 *)dst);
} else {
*(u32 *)dst = *(u32 *)src1 ^ *(u32 *)src2;
}
dst += 4;
src1 += 4;
src2 += 4;
len -= 4;
}
while (len >= 2 && !(relalign & 1)) {
if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
u16 l = get_unaligned((u16 *)src1) ^
get_unaligned((u16 *)src2);
put_unaligned(l, (u16 *)dst);
} else {
*(u16 *)dst = *(u16 *)src1 ^ *(u16 *)src2;
}
dst += 2;
src1 += 2;
src2 += 2;
len -= 2;
}
while (len--)
*dst++ = *src1++ ^ *src2++;
}
EXPORT_SYMBOL_GPL(__crypto_xor);
unsigned int crypto_alg_extsize(struct crypto_alg *alg) unsigned int crypto_alg_extsize(struct crypto_alg *alg)
{ {
return alg->cra_ctxsize + return alg->cra_ctxsize +

View File

@ -114,7 +114,7 @@ struct crypto_larval *crypto_larval_alloc(const char *name, u32 type, u32 mask)
larval->alg.cra_priority = -1; larval->alg.cra_priority = -1;
larval->alg.cra_destroy = crypto_larval_destroy; larval->alg.cra_destroy = crypto_larval_destroy;
strlcpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME); strscpy(larval->alg.cra_name, name, CRYPTO_MAX_ALG_NAME);
init_completion(&larval->completion); init_completion(&larval->completion);
return larval; return larval;
@ -321,7 +321,7 @@ struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
/* /*
* If the internal flag is set for a cipher, require a caller to * If the internal flag is set for a cipher, require a caller to
* to invoke the cipher with the internal flag to use that cipher. * invoke the cipher with the internal flag to use that cipher.
* Also, if a caller wants to allocate a cipher that may or may * Also, if a caller wants to allocate a cipher that may or may
* not be an internal cipher, use type | CRYPTO_ALG_INTERNAL and * not be an internal cipher, use type | CRYPTO_ALG_INTERNAL and
* !(mask & CRYPTO_ALG_INTERNAL). * !(mask & CRYPTO_ALG_INTERNAL).

313
crypto/aria_generic.c Normal file
View File

@ -0,0 +1,313 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Cryptographic API.
*
* ARIA Cipher Algorithm.
*
* Documentation of ARIA can be found in RFC 5794.
* Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com>
*
* Information for ARIA
* http://210.104.33.10/ARIA/index-e.html (English)
* http://seed.kisa.or.kr/ (Korean)
*
* Public domain version is distributed above.
*/
#include <crypto/aria.h>
static const u32 key_rc[20] = {
0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0,
0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0,
0xdb92371d, 0x2126e970, 0x03249775, 0x04e8c90e,
0x517cc1b7, 0x27220a94, 0xfe13abe8, 0xfa9a6ee0,
0x6db14acc, 0x9e21c820, 0xff28b1d5, 0xef5de2b0
};
static void aria_set_encrypt_key(struct aria_ctx *ctx, const u8 *in_key,
unsigned int key_len)
{
const __be32 *key = (const __be32 *)in_key;
u32 w0[4], w1[4], w2[4], w3[4];
u32 reg0, reg1, reg2, reg3;
const u32 *ck;
int rkidx = 0;
ck = &key_rc[(key_len - 16) / 2];
w0[0] = be32_to_cpu(key[0]);
w0[1] = be32_to_cpu(key[1]);
w0[2] = be32_to_cpu(key[2]);
w0[3] = be32_to_cpu(key[3]);
reg0 = w0[0] ^ ck[0];
reg1 = w0[1] ^ ck[1];
reg2 = w0[2] ^ ck[2];
reg3 = w0[3] ^ ck[3];
aria_subst_diff_odd(&reg0, &reg1, &reg2, &reg3);
if (key_len > 16) {
w1[0] = be32_to_cpu(key[4]);
w1[1] = be32_to_cpu(key[5]);
if (key_len > 24) {
w1[2] = be32_to_cpu(key[6]);
w1[3] = be32_to_cpu(key[7]);
} else {
w1[2] = 0;
w1[3] = 0;
}
} else {
w1[0] = 0;
w1[1] = 0;
w1[2] = 0;
w1[3] = 0;
}
w1[0] ^= reg0;
w1[1] ^= reg1;
w1[2] ^= reg2;
w1[3] ^= reg3;
reg0 = w1[0];
reg1 = w1[1];
reg2 = w1[2];
reg3 = w1[3];
reg0 ^= ck[4];
reg1 ^= ck[5];
reg2 ^= ck[6];
reg3 ^= ck[7];
aria_subst_diff_even(&reg0, &reg1, &reg2, &reg3);
reg0 ^= w0[0];
reg1 ^= w0[1];
reg2 ^= w0[2];
reg3 ^= w0[3];
w2[0] = reg0;
w2[1] = reg1;
w2[2] = reg2;
w2[3] = reg3;
reg0 ^= ck[8];
reg1 ^= ck[9];
reg2 ^= ck[10];
reg3 ^= ck[11];
aria_subst_diff_odd(&reg0, &reg1, &reg2, &reg3);
w3[0] = reg0 ^ w1[0];
w3[1] = reg1 ^ w1[1];
w3[2] = reg2 ^ w1[2];
w3[3] = reg3 ^ w1[3];
aria_gsrk(ctx->enc_key[rkidx], w0, w1, 19);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w1, w2, 19);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w2, w3, 19);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w3, w0, 19);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w0, w1, 31);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w1, w2, 31);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w2, w3, 31);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w3, w0, 31);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w0, w1, 67);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w1, w2, 67);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w2, w3, 67);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w3, w0, 67);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w0, w1, 97);
if (key_len > 16) {
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w1, w2, 97);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w2, w3, 97);
if (key_len > 24) {
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w3, w0, 97);
rkidx++;
aria_gsrk(ctx->enc_key[rkidx], w0, w1, 109);
}
}
}
static void aria_set_decrypt_key(struct aria_ctx *ctx)
{
int i;
for (i = 0; i < 4; i++) {
ctx->dec_key[0][i] = ctx->enc_key[ctx->rounds][i];
ctx->dec_key[ctx->rounds][i] = ctx->enc_key[0][i];
}
for (i = 1; i < ctx->rounds; i++) {
ctx->dec_key[i][0] = aria_m(ctx->enc_key[ctx->rounds - i][0]);
ctx->dec_key[i][1] = aria_m(ctx->enc_key[ctx->rounds - i][1]);
ctx->dec_key[i][2] = aria_m(ctx->enc_key[ctx->rounds - i][2]);
ctx->dec_key[i][3] = aria_m(ctx->enc_key[ctx->rounds - i][3]);
aria_diff_word(&ctx->dec_key[i][0], &ctx->dec_key[i][1],
&ctx->dec_key[i][2], &ctx->dec_key[i][3]);
aria_diff_byte(&ctx->dec_key[i][1],
&ctx->dec_key[i][2], &ctx->dec_key[i][3]);
aria_diff_word(&ctx->dec_key[i][0], &ctx->dec_key[i][1],
&ctx->dec_key[i][2], &ctx->dec_key[i][3]);
}
}
int aria_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len)
{
struct aria_ctx *ctx = crypto_tfm_ctx(tfm);
if (key_len != 16 && key_len != 24 && key_len != 32)
return -EINVAL;
ctx->key_length = key_len;
ctx->rounds = (key_len + 32) / 4;
aria_set_encrypt_key(ctx, in_key, key_len);
aria_set_decrypt_key(ctx);
return 0;
}
EXPORT_SYMBOL_GPL(aria_set_key);
static void __aria_crypt(struct aria_ctx *ctx, u8 *out, const u8 *in,
u32 key[][ARIA_RD_KEY_WORDS])
{
const __be32 *src = (const __be32 *)in;
__be32 *dst = (__be32 *)out;
u32 reg0, reg1, reg2, reg3;
int rounds, rkidx = 0;
rounds = ctx->rounds;
reg0 = be32_to_cpu(src[0]);
reg1 = be32_to_cpu(src[1]);
reg2 = be32_to_cpu(src[2]);
reg3 = be32_to_cpu(src[3]);
aria_add_round_key(key[rkidx], &reg0, &reg1, &reg2, &reg3);
rkidx++;
aria_subst_diff_odd(&reg0, &reg1, &reg2, &reg3);
aria_add_round_key(key[rkidx], &reg0, &reg1, &reg2, &reg3);
rkidx++;
while ((rounds -= 2) > 0) {
aria_subst_diff_even(&reg0, &reg1, &reg2, &reg3);
aria_add_round_key(key[rkidx], &reg0, &reg1, &reg2, &reg3);
rkidx++;
aria_subst_diff_odd(&reg0, &reg1, &reg2, &reg3);
aria_add_round_key(key[rkidx], &reg0, &reg1, &reg2, &reg3);
rkidx++;
}
reg0 = key[rkidx][0] ^ make_u32((u8)(x1[get_u8(reg0, 0)]),
(u8)(x2[get_u8(reg0, 1)] >> 8),
(u8)(s1[get_u8(reg0, 2)]),
(u8)(s2[get_u8(reg0, 3)]));
reg1 = key[rkidx][1] ^ make_u32((u8)(x1[get_u8(reg1, 0)]),
(u8)(x2[get_u8(reg1, 1)] >> 8),
(u8)(s1[get_u8(reg1, 2)]),
(u8)(s2[get_u8(reg1, 3)]));
reg2 = key[rkidx][2] ^ make_u32((u8)(x1[get_u8(reg2, 0)]),
(u8)(x2[get_u8(reg2, 1)] >> 8),
(u8)(s1[get_u8(reg2, 2)]),
(u8)(s2[get_u8(reg2, 3)]));
reg3 = key[rkidx][3] ^ make_u32((u8)(x1[get_u8(reg3, 0)]),
(u8)(x2[get_u8(reg3, 1)] >> 8),
(u8)(s1[get_u8(reg3, 2)]),
(u8)(s2[get_u8(reg3, 3)]));
dst[0] = cpu_to_be32(reg0);
dst[1] = cpu_to_be32(reg1);
dst[2] = cpu_to_be32(reg2);
dst[3] = cpu_to_be32(reg3);
}
void aria_encrypt(void *_ctx, u8 *out, const u8 *in)
{
struct aria_ctx *ctx = (struct aria_ctx *)_ctx;
__aria_crypt(ctx, out, in, ctx->enc_key);
}
EXPORT_SYMBOL_GPL(aria_encrypt);
void aria_decrypt(void *_ctx, u8 *out, const u8 *in)
{
struct aria_ctx *ctx = (struct aria_ctx *)_ctx;
__aria_crypt(ctx, out, in, ctx->dec_key);
}
EXPORT_SYMBOL_GPL(aria_decrypt);
static void __aria_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aria_ctx *ctx = crypto_tfm_ctx(tfm);
__aria_crypt(ctx, out, in, ctx->enc_key);
}
static void __aria_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
{
struct aria_ctx *ctx = crypto_tfm_ctx(tfm);
__aria_crypt(ctx, out, in, ctx->dec_key);
}
static struct crypto_alg aria_alg = {
.cra_name = "aria",
.cra_driver_name = "aria-generic",
.cra_priority = 100,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = ARIA_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aria_ctx),
.cra_alignmask = 3,
.cra_module = THIS_MODULE,
.cra_u = {
.cipher = {
.cia_min_keysize = ARIA_MIN_KEY_SIZE,
.cia_max_keysize = ARIA_MAX_KEY_SIZE,
.cia_setkey = aria_set_key,
.cia_encrypt = __aria_encrypt,
.cia_decrypt = __aria_decrypt
}
}
};
static int __init aria_init(void)
{
return crypto_register_alg(&aria_alg);
}
static void __exit aria_fini(void)
{
crypto_unregister_alg(&aria_alg);
}
subsys_initcall(aria_init);
module_exit(aria_fini);
MODULE_DESCRIPTION("ARIA Cipher Algorithm");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Taehee Yoo <ap420073@gmail.com>");
MODULE_ALIAS_CRYPTO("aria");
MODULE_ALIAS_CRYPTO("aria-generic");

View File

@ -37,7 +37,7 @@ static void makedata(int disks)
int i; int i;
for (i = 0; i < disks; i++) { for (i = 0; i < disks; i++) {
prandom_bytes(page_address(data[i]), PAGE_SIZE); get_random_bytes(page_address(data[i]), PAGE_SIZE);
dataptrs[i] = data[i]; dataptrs[i] = data[i];
dataoffs[i] = 0; dataoffs[i] = 0;
} }
@ -189,7 +189,7 @@ static int test(int disks, int *tests)
} }
static int raid6_test(void) static int __init raid6_test(void)
{ {
int err = 0; int err = 0;
int tests = 0; int tests = 0;
@ -236,7 +236,7 @@ static int raid6_test(void)
return 0; return 0;
} }
static void raid6_test_exit(void) static void __exit raid6_test_exit(void)
{ {
} }

View File

@ -72,12 +72,12 @@ static struct kpp_alg curve25519_alg = {
.max_size = curve25519_max_size, .max_size = curve25519_max_size,
}; };
static int curve25519_init(void) static int __init curve25519_init(void)
{ {
return crypto_register_kpp(&curve25519_alg); return crypto_register_kpp(&curve25519_alg);
} }
static void curve25519_exit(void) static void __exit curve25519_exit(void)
{ {
crypto_unregister_kpp(&curve25519_alg); crypto_unregister_kpp(&curve25519_alg);
} }

View File

@ -893,7 +893,7 @@ static struct crypto_template crypto_ffdhe_templates[] = {};
#endif /* CONFIG_CRYPTO_DH_RFC7919_GROUPS */ #endif /* CONFIG_CRYPTO_DH_RFC7919_GROUPS */
static int dh_init(void) static int __init dh_init(void)
{ {
int err; int err;
@ -911,7 +911,7 @@ static int dh_init(void)
return 0; return 0;
} }
static void dh_exit(void) static void __exit dh_exit(void)
{ {
crypto_unregister_templates(crypto_ffdhe_templates, crypto_unregister_templates(crypto_ffdhe_templates,
ARRAY_SIZE(crypto_ffdhe_templates)); ARRAY_SIZE(crypto_ffdhe_templates));

View File

@ -1703,7 +1703,7 @@ static int drbg_init_hash_kernel(struct drbg_state *drbg)
static int drbg_fini_hash_kernel(struct drbg_state *drbg) static int drbg_fini_hash_kernel(struct drbg_state *drbg)
{ {
struct sdesc *sdesc = (struct sdesc *)drbg->priv_data; struct sdesc *sdesc = drbg->priv_data;
if (sdesc) { if (sdesc) {
crypto_free_shash(sdesc->shash.tfm); crypto_free_shash(sdesc->shash.tfm);
kfree_sensitive(sdesc); kfree_sensitive(sdesc);
@ -1715,7 +1715,7 @@ static int drbg_fini_hash_kernel(struct drbg_state *drbg)
static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg, static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg,
const unsigned char *key) const unsigned char *key)
{ {
struct sdesc *sdesc = (struct sdesc *)drbg->priv_data; struct sdesc *sdesc = drbg->priv_data;
crypto_shash_setkey(sdesc->shash.tfm, key, drbg_statelen(drbg)); crypto_shash_setkey(sdesc->shash.tfm, key, drbg_statelen(drbg));
} }
@ -1723,7 +1723,7 @@ static void drbg_kcapi_hmacsetkey(struct drbg_state *drbg,
static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval, static int drbg_kcapi_hash(struct drbg_state *drbg, unsigned char *outval,
const struct list_head *in) const struct list_head *in)
{ {
struct sdesc *sdesc = (struct sdesc *)drbg->priv_data; struct sdesc *sdesc = drbg->priv_data;
struct drbg_string *input = NULL; struct drbg_string *input = NULL;
crypto_shash_init(&sdesc->shash); crypto_shash_init(&sdesc->shash);
@ -1818,8 +1818,7 @@ static int drbg_init_sym_kernel(struct drbg_state *drbg)
static void drbg_kcapi_symsetkey(struct drbg_state *drbg, static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
const unsigned char *key) const unsigned char *key)
{ {
struct crypto_cipher *tfm = struct crypto_cipher *tfm = drbg->priv_data;
(struct crypto_cipher *)drbg->priv_data;
crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg))); crypto_cipher_setkey(tfm, key, (drbg_keylen(drbg)));
} }
@ -1827,8 +1826,7 @@ static void drbg_kcapi_symsetkey(struct drbg_state *drbg,
static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval, static int drbg_kcapi_sym(struct drbg_state *drbg, unsigned char *outval,
const struct drbg_string *in) const struct drbg_string *in)
{ {
struct crypto_cipher *tfm = struct crypto_cipher *tfm = drbg->priv_data;
(struct crypto_cipher *)drbg->priv_data;
/* there is only component in *in */ /* there is only component in *in */
BUG_ON(in->len < drbg_blocklen(drbg)); BUG_ON(in->len < drbg_blocklen(drbg));

View File

@ -200,7 +200,7 @@ static struct kpp_alg ecdh_nist_p384 = {
static bool ecdh_nist_p192_registered; static bool ecdh_nist_p192_registered;
static int ecdh_init(void) static int __init ecdh_init(void)
{ {
int ret; int ret;
@ -227,7 +227,7 @@ static int ecdh_init(void)
return ret; return ret;
} }
static void ecdh_exit(void) static void __exit ecdh_exit(void)
{ {
if (ecdh_nist_p192_registered) if (ecdh_nist_p192_registered)
crypto_unregister_kpp(&ecdh_nist_p192); crypto_unregister_kpp(&ecdh_nist_p192);

View File

@ -332,7 +332,7 @@ static struct akcipher_alg ecdsa_nist_p192 = {
}; };
static bool ecdsa_nist_p192_registered; static bool ecdsa_nist_p192_registered;
static int ecdsa_init(void) static int __init ecdsa_init(void)
{ {
int ret; int ret;
@ -359,7 +359,7 @@ static int ecdsa_init(void)
return ret; return ret;
} }
static void ecdsa_exit(void) static void __exit ecdsa_exit(void)
{ {
if (ecdsa_nist_p192_registered) if (ecdsa_nist_p192_registered)
crypto_unregister_akcipher(&ecdsa_nist_p192); crypto_unregister_akcipher(&ecdsa_nist_p192);

View File

@ -543,7 +543,7 @@ static int essiv_create(struct crypto_template *tmpl, struct rtattr **tb)
} }
/* record the driver name so we can instantiate this exact algo later */ /* record the driver name so we can instantiate this exact algo later */
strlcpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name, strscpy(ictx->shash_driver_name, hash_alg->base.cra_driver_name,
CRYPTO_MAX_ALG_NAME); CRYPTO_MAX_ALG_NAME);
/* Instance fields */ /* Instance fields */

View File

@ -327,7 +327,7 @@ static struct akcipher_alg rsa = {
}, },
}; };
static int rsa_init(void) static int __init rsa_init(void)
{ {
int err; int err;
@ -344,7 +344,7 @@ static int rsa_init(void)
return 0; return 0;
} }
static void rsa_exit(void) static void __exit rsa_exit(void)
{ {
crypto_unregister_template(&rsa_pkcs1pad_tmpl); crypto_unregister_template(&rsa_pkcs1pad_tmpl);
crypto_unregister_akcipher(&rsa); crypto_unregister_akcipher(&rsa);

View File

@ -441,12 +441,12 @@ static struct akcipher_alg sm2 = {
}, },
}; };
static int sm2_init(void) static int __init sm2_init(void)
{ {
return crypto_register_akcipher(&sm2); return crypto_register_akcipher(&sm2);
} }
static void sm2_exit(void) static void __exit sm2_exit(void)
{ {
crypto_unregister_akcipher(&sm2); crypto_unregister_akcipher(&sm2);
} }

View File

@ -66,17 +66,6 @@ static u32 num_mb = 8;
static unsigned int klen; static unsigned int klen;
static char *tvmem[TVMEMSIZE]; static char *tvmem[TVMEMSIZE];
static const char *check[] = {
"des", "md5", "des3_ede", "rot13", "sha1", "sha224", "sha256", "sm3",
"blowfish", "twofish", "serpent", "sha384", "sha512", "md4", "aes",
"cast6", "arc4", "michael_mic", "deflate", "crc32c", "tea", "xtea",
"khazad", "wp512", "wp384", "wp256", "xeta", "fcrypt",
"camellia", "seed", "rmd160", "aria",
"lzo", "lzo-rle", "cts", "sha3-224", "sha3-256", "sha3-384",
"sha3-512", "streebog256", "streebog512",
NULL
};
static const int block_sizes[] = { 16, 64, 128, 256, 1024, 1420, 4096, 0 }; static const int block_sizes[] = { 16, 64, 128, 256, 1024, 1420, 4096, 0 };
static const int aead_sizes[] = { 16, 64, 256, 512, 1024, 1420, 4096, 8192, 0 }; static const int aead_sizes[] = { 16, 64, 256, 512, 1024, 1420, 4096, 8192, 0 };
@ -1454,18 +1443,6 @@ static void test_cipher_speed(const char *algo, int enc, unsigned int secs,
false); false);
} }
static void test_available(void)
{
const char **name = check;
while (*name) {
printk("alg %s ", *name);
printk(crypto_has_alg(*name, 0, 0) ?
"found\n" : "not found\n");
name++;
}
}
static inline int tcrypt_test(const char *alg) static inline int tcrypt_test(const char *alg)
{ {
int ret; int ret;
@ -2228,6 +2205,13 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
NULL, 0, 16, 8, speed_template_16_24_32); NULL, 0, 16, 8, speed_template_16_24_32);
break; break;
case 229:
test_mb_aead_speed("gcm(aria)", ENCRYPT, sec, NULL, 0, 16, 8,
speed_template_16, num_mb);
test_mb_aead_speed("gcm(aria)", DECRYPT, sec, NULL, 0, 16, 8,
speed_template_16, num_mb);
break;
case 300: case 300:
if (alg) { if (alg) {
test_hash_speed(alg, sec, generic_hash_speed_template); test_hash_speed(alg, sec, generic_hash_speed_template);
@ -2648,6 +2632,17 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
speed_template_16); speed_template_16);
break; break;
case 519:
test_acipher_speed("ecb(aria)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ecb(aria)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ctr(aria)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32);
test_acipher_speed("ctr(aria)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
break;
case 600: case 600:
test_mb_skcipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0, test_mb_skcipher_speed("ecb(aes)", ENCRYPT, sec, NULL, 0,
speed_template_16_24_32, num_mb); speed_template_16_24_32, num_mb);
@ -2860,9 +2855,17 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
speed_template_8_32, num_mb); speed_template_8_32, num_mb);
break; break;
case 1000: case 610:
test_available(); test_mb_skcipher_speed("ecb(aria)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ecb(aria)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ctr(aria)", ENCRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
test_mb_skcipher_speed("ctr(aria)", DECRYPT, sec, NULL, 0,
speed_template_16_32, num_mb);
break; break;
} }
return ret; return ret;

View File

@ -855,9 +855,9 @@ static int prepare_keybuf(const u8 *key, unsigned int ksize,
/* Generate a random length in range [0, max_len], but prefer smaller values */ /* Generate a random length in range [0, max_len], but prefer smaller values */
static unsigned int generate_random_length(unsigned int max_len) static unsigned int generate_random_length(unsigned int max_len)
{ {
unsigned int len = prandom_u32() % (max_len + 1); unsigned int len = prandom_u32_max(max_len + 1);
switch (prandom_u32() % 4) { switch (prandom_u32_max(4)) {
case 0: case 0:
return len % 64; return len % 64;
case 1: case 1:
@ -874,14 +874,14 @@ static void flip_random_bit(u8 *buf, size_t size)
{ {
size_t bitpos; size_t bitpos;
bitpos = prandom_u32() % (size * 8); bitpos = prandom_u32_max(size * 8);
buf[bitpos / 8] ^= 1 << (bitpos % 8); buf[bitpos / 8] ^= 1 << (bitpos % 8);
} }
/* Flip a random byte in the given nonempty data buffer */ /* Flip a random byte in the given nonempty data buffer */
static void flip_random_byte(u8 *buf, size_t size) static void flip_random_byte(u8 *buf, size_t size)
{ {
buf[prandom_u32() % size] ^= 0xff; buf[prandom_u32_max(size)] ^= 0xff;
} }
/* Sometimes make some random changes to the given nonempty data buffer */ /* Sometimes make some random changes to the given nonempty data buffer */
@ -891,15 +891,15 @@ static void mutate_buffer(u8 *buf, size_t size)
size_t i; size_t i;
/* Sometimes flip some bits */ /* Sometimes flip some bits */
if (prandom_u32() % 4 == 0) { if (prandom_u32_max(4) == 0) {
num_flips = min_t(size_t, 1 << (prandom_u32() % 8), size * 8); num_flips = min_t(size_t, 1 << prandom_u32_max(8), size * 8);
for (i = 0; i < num_flips; i++) for (i = 0; i < num_flips; i++)
flip_random_bit(buf, size); flip_random_bit(buf, size);
} }
/* Sometimes flip some bytes */ /* Sometimes flip some bytes */
if (prandom_u32() % 4 == 0) { if (prandom_u32_max(4) == 0) {
num_flips = min_t(size_t, 1 << (prandom_u32() % 8), size); num_flips = min_t(size_t, 1 << prandom_u32_max(8), size);
for (i = 0; i < num_flips; i++) for (i = 0; i < num_flips; i++)
flip_random_byte(buf, size); flip_random_byte(buf, size);
} }
@ -915,11 +915,11 @@ static void generate_random_bytes(u8 *buf, size_t count)
if (count == 0) if (count == 0)
return; return;
switch (prandom_u32() % 8) { /* Choose a generation strategy */ switch (prandom_u32_max(8)) { /* Choose a generation strategy */
case 0: case 0:
case 1: case 1:
/* All the same byte, plus optional mutations */ /* All the same byte, plus optional mutations */
switch (prandom_u32() % 4) { switch (prandom_u32_max(4)) {
case 0: case 0:
b = 0x00; b = 0x00;
break; break;
@ -927,7 +927,7 @@ static void generate_random_bytes(u8 *buf, size_t count)
b = 0xff; b = 0xff;
break; break;
default: default:
b = (u8)prandom_u32(); b = get_random_u8();
break; break;
} }
memset(buf, b, count); memset(buf, b, count);
@ -935,8 +935,8 @@ static void generate_random_bytes(u8 *buf, size_t count)
break; break;
case 2: case 2:
/* Ascending or descending bytes, plus optional mutations */ /* Ascending or descending bytes, plus optional mutations */
increment = (u8)prandom_u32(); increment = get_random_u8();
b = (u8)prandom_u32(); b = get_random_u8();
for (i = 0; i < count; i++, b += increment) for (i = 0; i < count; i++, b += increment)
buf[i] = b; buf[i] = b;
mutate_buffer(buf, count); mutate_buffer(buf, count);
@ -944,7 +944,7 @@ static void generate_random_bytes(u8 *buf, size_t count)
default: default:
/* Fully random bytes */ /* Fully random bytes */
for (i = 0; i < count; i++) for (i = 0; i < count; i++)
buf[i] = (u8)prandom_u32(); buf[i] = get_random_u8();
} }
} }
@ -959,24 +959,24 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
unsigned int this_len; unsigned int this_len;
const char *flushtype_str; const char *flushtype_str;
if (div == &divs[max_divs - 1] || prandom_u32() % 2 == 0) if (div == &divs[max_divs - 1] || prandom_u32_max(2) == 0)
this_len = remaining; this_len = remaining;
else else
this_len = 1 + (prandom_u32() % remaining); this_len = 1 + prandom_u32_max(remaining);
div->proportion_of_total = this_len; div->proportion_of_total = this_len;
if (prandom_u32() % 4 == 0) if (prandom_u32_max(4) == 0)
div->offset = (PAGE_SIZE - 128) + (prandom_u32() % 128); div->offset = (PAGE_SIZE - 128) + prandom_u32_max(128);
else if (prandom_u32() % 2 == 0) else if (prandom_u32_max(2) == 0)
div->offset = prandom_u32() % 32; div->offset = prandom_u32_max(32);
else else
div->offset = prandom_u32() % PAGE_SIZE; div->offset = prandom_u32_max(PAGE_SIZE);
if (prandom_u32() % 8 == 0) if (prandom_u32_max(8) == 0)
div->offset_relative_to_alignmask = true; div->offset_relative_to_alignmask = true;
div->flush_type = FLUSH_TYPE_NONE; div->flush_type = FLUSH_TYPE_NONE;
if (gen_flushes) { if (gen_flushes) {
switch (prandom_u32() % 4) { switch (prandom_u32_max(4)) {
case 0: case 0:
div->flush_type = FLUSH_TYPE_REIMPORT; div->flush_type = FLUSH_TYPE_REIMPORT;
break; break;
@ -988,7 +988,7 @@ static char *generate_random_sgl_divisions(struct test_sg_division *divs,
if (div->flush_type != FLUSH_TYPE_NONE && if (div->flush_type != FLUSH_TYPE_NONE &&
!(req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) && !(req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
prandom_u32() % 2 == 0) prandom_u32_max(2) == 0)
div->nosimd = true; div->nosimd = true;
switch (div->flush_type) { switch (div->flush_type) {
@ -1035,7 +1035,7 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
p += scnprintf(p, end - p, "random:"); p += scnprintf(p, end - p, "random:");
switch (prandom_u32() % 4) { switch (prandom_u32_max(4)) {
case 0: case 0:
case 1: case 1:
cfg->inplace_mode = OUT_OF_PLACE; cfg->inplace_mode = OUT_OF_PLACE;
@ -1050,12 +1050,12 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
break; break;
} }
if (prandom_u32() % 2 == 0) { if (prandom_u32_max(2) == 0) {
cfg->req_flags |= CRYPTO_TFM_REQ_MAY_SLEEP; cfg->req_flags |= CRYPTO_TFM_REQ_MAY_SLEEP;
p += scnprintf(p, end - p, " may_sleep"); p += scnprintf(p, end - p, " may_sleep");
} }
switch (prandom_u32() % 4) { switch (prandom_u32_max(4)) {
case 0: case 0:
cfg->finalization_type = FINALIZATION_TYPE_FINAL; cfg->finalization_type = FINALIZATION_TYPE_FINAL;
p += scnprintf(p, end - p, " use_final"); p += scnprintf(p, end - p, " use_final");
@ -1071,7 +1071,7 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
} }
if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) && if (!(cfg->req_flags & CRYPTO_TFM_REQ_MAY_SLEEP) &&
prandom_u32() % 2 == 0) { prandom_u32_max(2) == 0) {
cfg->nosimd = true; cfg->nosimd = true;
p += scnprintf(p, end - p, " nosimd"); p += scnprintf(p, end - p, " nosimd");
} }
@ -1084,7 +1084,7 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
cfg->req_flags); cfg->req_flags);
p += scnprintf(p, end - p, "]"); p += scnprintf(p, end - p, "]");
if (cfg->inplace_mode == OUT_OF_PLACE && prandom_u32() % 2 == 0) { if (cfg->inplace_mode == OUT_OF_PLACE && prandom_u32_max(2) == 0) {
p += scnprintf(p, end - p, " dst_divs=["); p += scnprintf(p, end - p, " dst_divs=[");
p = generate_random_sgl_divisions(cfg->dst_divs, p = generate_random_sgl_divisions(cfg->dst_divs,
ARRAY_SIZE(cfg->dst_divs), ARRAY_SIZE(cfg->dst_divs),
@ -1093,13 +1093,13 @@ static void generate_random_testvec_config(struct testvec_config *cfg,
p += scnprintf(p, end - p, "]"); p += scnprintf(p, end - p, "]");
} }
if (prandom_u32() % 2 == 0) { if (prandom_u32_max(2) == 0) {
cfg->iv_offset = 1 + (prandom_u32() % MAX_ALGAPI_ALIGNMASK); cfg->iv_offset = 1 + prandom_u32_max(MAX_ALGAPI_ALIGNMASK);
p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset); p += scnprintf(p, end - p, " iv_offset=%u", cfg->iv_offset);
} }
if (prandom_u32() % 2 == 0) { if (prandom_u32_max(2) == 0) {
cfg->key_offset = 1 + (prandom_u32() % MAX_ALGAPI_ALIGNMASK); cfg->key_offset = 1 + prandom_u32_max(MAX_ALGAPI_ALIGNMASK);
p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset); p += scnprintf(p, end - p, " key_offset=%u", cfg->key_offset);
} }
@ -1652,8 +1652,8 @@ static void generate_random_hash_testvec(struct shash_desc *desc,
vec->ksize = 0; vec->ksize = 0;
if (maxkeysize) { if (maxkeysize) {
vec->ksize = maxkeysize; vec->ksize = maxkeysize;
if (prandom_u32() % 4 == 0) if (prandom_u32_max(4) == 0)
vec->ksize = 1 + (prandom_u32() % maxkeysize); vec->ksize = 1 + prandom_u32_max(maxkeysize);
generate_random_bytes((u8 *)vec->key, vec->ksize); generate_random_bytes((u8 *)vec->key, vec->ksize);
vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key, vec->setkey_error = crypto_shash_setkey(desc->tfm, vec->key,
@ -2218,13 +2218,13 @@ static void mutate_aead_message(struct aead_testvec *vec, bool aad_iv,
const unsigned int aad_tail_size = aad_iv ? ivsize : 0; const unsigned int aad_tail_size = aad_iv ? ivsize : 0;
const unsigned int authsize = vec->clen - vec->plen; const unsigned int authsize = vec->clen - vec->plen;
if (prandom_u32() % 2 == 0 && vec->alen > aad_tail_size) { if (prandom_u32_max(2) == 0 && vec->alen > aad_tail_size) {
/* Mutate the AAD */ /* Mutate the AAD */
flip_random_bit((u8 *)vec->assoc, vec->alen - aad_tail_size); flip_random_bit((u8 *)vec->assoc, vec->alen - aad_tail_size);
if (prandom_u32() % 2 == 0) if (prandom_u32_max(2) == 0)
return; return;
} }
if (prandom_u32() % 2 == 0) { if (prandom_u32_max(2) == 0) {
/* Mutate auth tag (assuming it's at the end of ciphertext) */ /* Mutate auth tag (assuming it's at the end of ciphertext) */
flip_random_bit((u8 *)vec->ctext + vec->plen, authsize); flip_random_bit((u8 *)vec->ctext + vec->plen, authsize);
} else { } else {
@ -2249,7 +2249,7 @@ static void generate_aead_message(struct aead_request *req,
const unsigned int ivsize = crypto_aead_ivsize(tfm); const unsigned int ivsize = crypto_aead_ivsize(tfm);
const unsigned int authsize = vec->clen - vec->plen; const unsigned int authsize = vec->clen - vec->plen;
const bool inauthentic = (authsize >= MIN_COLLISION_FREE_AUTHSIZE) && const bool inauthentic = (authsize >= MIN_COLLISION_FREE_AUTHSIZE) &&
(prefer_inauthentic || prandom_u32() % 4 == 0); (prefer_inauthentic || prandom_u32_max(4) == 0);
/* Generate the AAD. */ /* Generate the AAD. */
generate_random_bytes((u8 *)vec->assoc, vec->alen); generate_random_bytes((u8 *)vec->assoc, vec->alen);
@ -2257,7 +2257,7 @@ static void generate_aead_message(struct aead_request *req,
/* Avoid implementation-defined behavior. */ /* Avoid implementation-defined behavior. */
memcpy((u8 *)vec->assoc + vec->alen - ivsize, vec->iv, ivsize); memcpy((u8 *)vec->assoc + vec->alen - ivsize, vec->iv, ivsize);
if (inauthentic && prandom_u32() % 2 == 0) { if (inauthentic && prandom_u32_max(2) == 0) {
/* Generate a random ciphertext. */ /* Generate a random ciphertext. */
generate_random_bytes((u8 *)vec->ctext, vec->clen); generate_random_bytes((u8 *)vec->ctext, vec->clen);
} else { } else {
@ -2321,8 +2321,8 @@ static void generate_random_aead_testvec(struct aead_request *req,
/* Key: length in [0, maxkeysize], but usually choose maxkeysize */ /* Key: length in [0, maxkeysize], but usually choose maxkeysize */
vec->klen = maxkeysize; vec->klen = maxkeysize;
if (prandom_u32() % 4 == 0) if (prandom_u32_max(4) == 0)
vec->klen = prandom_u32() % (maxkeysize + 1); vec->klen = prandom_u32_max(maxkeysize + 1);
generate_random_bytes((u8 *)vec->key, vec->klen); generate_random_bytes((u8 *)vec->key, vec->klen);
vec->setkey_error = crypto_aead_setkey(tfm, vec->key, vec->klen); vec->setkey_error = crypto_aead_setkey(tfm, vec->key, vec->klen);
@ -2331,8 +2331,8 @@ static void generate_random_aead_testvec(struct aead_request *req,
/* Tag length: in [0, maxauthsize], but usually choose maxauthsize */ /* Tag length: in [0, maxauthsize], but usually choose maxauthsize */
authsize = maxauthsize; authsize = maxauthsize;
if (prandom_u32() % 4 == 0) if (prandom_u32_max(4) == 0)
authsize = prandom_u32() % (maxauthsize + 1); authsize = prandom_u32_max(maxauthsize + 1);
if (prefer_inauthentic && authsize < MIN_COLLISION_FREE_AUTHSIZE) if (prefer_inauthentic && authsize < MIN_COLLISION_FREE_AUTHSIZE)
authsize = MIN_COLLISION_FREE_AUTHSIZE; authsize = MIN_COLLISION_FREE_AUTHSIZE;
if (WARN_ON(authsize > maxdatasize)) if (WARN_ON(authsize > maxdatasize))
@ -2342,7 +2342,7 @@ static void generate_random_aead_testvec(struct aead_request *req,
/* AAD, plaintext, and ciphertext lengths */ /* AAD, plaintext, and ciphertext lengths */
total_len = generate_random_length(maxdatasize); total_len = generate_random_length(maxdatasize);
if (prandom_u32() % 4 == 0) if (prandom_u32_max(4) == 0)
vec->alen = 0; vec->alen = 0;
else else
vec->alen = generate_random_length(total_len); vec->alen = generate_random_length(total_len);
@ -2958,8 +2958,8 @@ static void generate_random_cipher_testvec(struct skcipher_request *req,
/* Key: length in [0, maxkeysize], but usually choose maxkeysize */ /* Key: length in [0, maxkeysize], but usually choose maxkeysize */
vec->klen = maxkeysize; vec->klen = maxkeysize;
if (prandom_u32() % 4 == 0) if (prandom_u32_max(4) == 0)
vec->klen = prandom_u32() % (maxkeysize + 1); vec->klen = prandom_u32_max(maxkeysize + 1);
generate_random_bytes((u8 *)vec->key, vec->klen); generate_random_bytes((u8 *)vec->key, vec->klen);
vec->setkey_error = crypto_skcipher_setkey(tfm, vec->key, vec->klen); vec->setkey_error = crypto_skcipher_setkey(tfm, vec->key, vec->klen);
@ -3322,7 +3322,7 @@ static int test_comp(struct crypto_comp *tfm,
} }
static int test_acomp(struct crypto_acomp *tfm, static int test_acomp(struct crypto_acomp *tfm,
const struct comp_testvec *ctemplate, const struct comp_testvec *ctemplate,
const struct comp_testvec *dtemplate, const struct comp_testvec *dtemplate,
int ctcount, int dtcount) int ctcount, int dtcount)
{ {
@ -3417,6 +3417,21 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out; goto out;
} }
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
crypto_init_wait(&wait);
sg_init_one(&src, input_vec, ilen);
acomp_request_set_params(req, &src, NULL, ilen, 0);
ret = crypto_wait_req(crypto_acomp_compress(req), &wait);
if (ret) {
pr_err("alg: acomp: compression failed on NULL dst buffer test %d for %s: ret=%d\n",
i + 1, algo, -ret);
kfree(input_vec);
acomp_request_free(req);
goto out;
}
#endif
kfree(input_vec); kfree(input_vec);
acomp_request_free(req); acomp_request_free(req);
} }
@ -3478,6 +3493,20 @@ static int test_acomp(struct crypto_acomp *tfm,
goto out; goto out;
} }
#ifdef CONFIG_CRYPTO_MANAGER_EXTRA_TESTS
crypto_init_wait(&wait);
acomp_request_set_params(req, &src, NULL, ilen, 0);
ret = crypto_wait_req(crypto_acomp_decompress(req), &wait);
if (ret) {
pr_err("alg: acomp: decompression failed on NULL dst buffer test %d for %s: ret=%d\n",
i + 1, algo, -ret);
kfree(input_vec);
acomp_request_free(req);
goto out;
}
#endif
kfree(input_vec); kfree(input_vec);
acomp_request_free(req); acomp_request_free(req);
} }
@ -5801,8 +5830,11 @@ int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
driver, alg, driver, alg,
fips_enabled ? "fips" : "panic_on_fail"); fips_enabled ? "fips" : "panic_on_fail");
} }
WARN(1, "alg: self-tests for %s (%s) failed (rc=%d)", pr_warn("alg: self-tests for %s using %s failed (rc=%d)",
driver, alg, rc); alg, driver, rc);
WARN(rc != -ENOENT,
"alg: self-tests for %s using %s failed (rc=%d)",
alg, driver, rc);
} else { } else {
if (fips_enabled) if (fips_enabled)
pr_info("alg: self-tests for %s (%s) passed\n", pr_info("alg: self-tests for %s (%s) passed\n",

View File

@ -175,6 +175,7 @@ obj-$(CONFIG_USB4) += thunderbolt/
obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/ obj-$(CONFIG_CORESIGHT) += hwtracing/coresight/
obj-y += hwtracing/intel_th/ obj-y += hwtracing/intel_th/
obj-$(CONFIG_STM) += hwtracing/stm/ obj-$(CONFIG_STM) += hwtracing/stm/
obj-$(CONFIG_HISI_PTT) += hwtracing/ptt/
obj-y += android/ obj-y += android/
obj-$(CONFIG_NVMEM) += nvmem/ obj-$(CONFIG_NVMEM) += nvmem/
obj-$(CONFIG_FPGA) += fpga/ obj-$(CONFIG_FPGA) += fpga/

View File

@ -1778,7 +1778,7 @@ static void speakup_con_update(struct vc_data *vc)
{ {
unsigned long flags; unsigned long flags;
if (!speakup_console[vc->vc_num] || spk_parked) if (!speakup_console[vc->vc_num] || spk_parked || !synth)
return; return;
if (!spin_trylock_irqsave(&speakup_info.spinlock, flags)) if (!spin_trylock_irqsave(&speakup_info.spinlock, flags))
/* Speakup output, discard */ /* Speakup output, discard */

View File

@ -27,6 +27,7 @@ static struct var_t vars[] = {
{ INFLECTION, .u.n = {"INFLECTION %d\n", 8, 0, 16, 0, 0, NULL } }, { INFLECTION, .u.n = {"INFLECTION %d\n", 8, 0, 16, 0, 0, NULL } },
{ VOL, .u.n = {"VOL %d\n", 8, 0, 16, 0, 0, NULL } }, { VOL, .u.n = {"VOL %d\n", 8, 0, 16, 0, 0, NULL } },
{ TONE, .u.n = {"TONE %d\n", 8, 0, 16, 0, 0, NULL } }, { TONE, .u.n = {"TONE %d\n", 8, 0, 16, 0, 0, NULL } },
{ PUNCT, .u.n = {"PUNCT %d\n", 0, 0, 3, 0, 0, NULL } },
{ DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR V_LAST_VAR
}; };
@ -42,6 +43,8 @@ static struct kobj_attribute pitch_attribute =
__ATTR(pitch, 0644, spk_var_show, spk_var_store); __ATTR(pitch, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute inflection_attribute = static struct kobj_attribute inflection_attribute =
__ATTR(inflection, 0644, spk_var_show, spk_var_store); __ATTR(inflection, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute punct_attribute =
__ATTR(punct, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute rate_attribute = static struct kobj_attribute rate_attribute =
__ATTR(rate, 0644, spk_var_show, spk_var_store); __ATTR(rate, 0644, spk_var_show, spk_var_store);
static struct kobj_attribute tone_attribute = static struct kobj_attribute tone_attribute =
@ -69,6 +72,7 @@ static struct attribute *synth_attrs[] = {
&caps_stop_attribute.attr, &caps_stop_attribute.attr,
&pitch_attribute.attr, &pitch_attribute.attr,
&inflection_attribute.attr, &inflection_attribute.attr,
&punct_attribute.attr,
&rate_attribute.attr, &rate_attribute.attr,
&tone_attribute.attr, &tone_attribute.attr,
&vol_attribute.attr, &vol_attribute.attr,

View File

@ -26,6 +26,7 @@
static int softsynth_probe(struct spk_synth *synth); static int softsynth_probe(struct spk_synth *synth);
static void softsynth_release(struct spk_synth *synth); static void softsynth_release(struct spk_synth *synth);
static int softsynth_is_alive(struct spk_synth *synth); static int softsynth_is_alive(struct spk_synth *synth);
static int softsynth_adjust(struct spk_synth *synth, struct st_var_header *var);
static unsigned char get_index(struct spk_synth *synth); static unsigned char get_index(struct spk_synth *synth);
static struct miscdevice synth_device, synthu_device; static struct miscdevice synth_device, synthu_device;
@ -33,6 +34,9 @@ static int init_pos;
static int misc_registered; static int misc_registered;
static struct var_t vars[] = { static struct var_t vars[] = {
/* DIRECT is put first so that module_param_named can access it easily */
{ DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
{ CAPS_START, .u.s = {"\x01+3p" } }, { CAPS_START, .u.s = {"\x01+3p" } },
{ CAPS_STOP, .u.s = {"\x01-3p" } }, { CAPS_STOP, .u.s = {"\x01-3p" } },
{ PAUSE, .u.n = {"\x01P" } }, { PAUSE, .u.n = {"\x01P" } },
@ -41,10 +45,9 @@ static struct var_t vars[] = {
{ INFLECTION, .u.n = {"\x01%dr", 5, 0, 9, 0, 0, NULL } }, { INFLECTION, .u.n = {"\x01%dr", 5, 0, 9, 0, 0, NULL } },
{ VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } }, { VOL, .u.n = {"\x01%dv", 5, 0, 9, 0, 0, NULL } },
{ TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } }, { TONE, .u.n = {"\x01%dx", 1, 0, 2, 0, 0, NULL } },
{ PUNCT, .u.n = {"\x01%db", 0, 0, 2, 0, 0, NULL } }, { PUNCT, .u.n = {"\x01%db", 0, 0, 3, 0, 0, NULL } },
{ VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } }, { VOICE, .u.n = {"\x01%do", 0, 0, 7, 0, 0, NULL } },
{ FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } }, { FREQUENCY, .u.n = {"\x01%df", 5, 0, 9, 0, 0, NULL } },
{ DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } },
V_LAST_VAR V_LAST_VAR
}; };
@ -133,7 +136,7 @@ static struct spk_synth synth_soft = {
.catch_up = NULL, .catch_up = NULL,
.flush = NULL, .flush = NULL,
.is_alive = softsynth_is_alive, .is_alive = softsynth_is_alive,
.synth_adjust = NULL, .synth_adjust = softsynth_adjust,
.read_buff_add = NULL, .read_buff_add = NULL,
.get_index = get_index, .get_index = get_index,
.indexing = { .indexing = {
@ -426,9 +429,32 @@ static int softsynth_is_alive(struct spk_synth *synth)
return 0; return 0;
} }
static int softsynth_adjust(struct spk_synth *synth, struct st_var_header *var)
{
struct st_var_header *punc_level_var;
struct var_t *var_data;
if (var->var_id != PUNC_LEVEL)
return 0;
/* We want to set the the speech synthesis punctuation level
* accordingly, so it properly tunes speaking A_PUNC characters */
var_data = var->data;
if (!var_data)
return 0;
punc_level_var = spk_get_var_header(PUNCT);
if (!punc_level_var)
return 0;
spk_set_num_var(var_data->u.n.value, punc_level_var, E_SET);
return 1;
}
module_param_named(start, synth_soft.startup, short, 0444); module_param_named(start, synth_soft.startup, short, 0444);
module_param_named(direct, vars[0].u.n.default_val, int, 0444);
MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded.");
MODULE_PARM_DESC(direct, "Set the direct variable on load.");
module_spk_synth(synth_soft); module_spk_synth(synth_soft);

View File

@ -195,7 +195,7 @@ struct spk_synth {
void (*catch_up)(struct spk_synth *synth); void (*catch_up)(struct spk_synth *synth);
void (*flush)(struct spk_synth *synth); void (*flush)(struct spk_synth *synth);
int (*is_alive)(struct spk_synth *synth); int (*is_alive)(struct spk_synth *synth);
int (*synth_adjust)(struct st_var_header *var); int (*synth_adjust)(struct spk_synth *synth, struct st_var_header *var);
void (*read_buff_add)(u_char c); void (*read_buff_add)(u_char c);
unsigned char (*get_index)(struct spk_synth *synth); unsigned char (*get_index)(struct spk_synth *synth);
struct synth_indexing indexing; struct synth_indexing indexing;

View File

@ -54,7 +54,7 @@ static inline int oops(const char *msg, const char *info)
static inline struct st_key *hash_name(char *name) static inline struct st_key *hash_name(char *name)
{ {
u_char *pn = (u_char *)name; unsigned char *pn = (unsigned char *)name;
int hash = 0; int hash = 0;
while (*pn) { while (*pn) {

View File

@ -138,6 +138,7 @@ struct st_var_header *spk_get_var_header(enum var_id_t var_id)
return NULL; return NULL;
return p_header; return p_header;
} }
EXPORT_SYMBOL_GPL(spk_get_var_header);
struct st_var_header *spk_var_header_by_name(const char *name) struct st_var_header *spk_var_header_by_name(const char *name)
{ {
@ -221,15 +222,17 @@ int spk_set_num_var(int input, struct st_var_header *var, int how)
*p_val = val; *p_val = val;
if (var->var_id == PUNC_LEVEL) { if (var->var_id == PUNC_LEVEL) {
spk_punc_mask = spk_punc_masks[val]; spk_punc_mask = spk_punc_masks[val];
return 0;
} }
if (var_data->u.n.multiplier != 0) if (var_data->u.n.multiplier != 0)
val *= var_data->u.n.multiplier; val *= var_data->u.n.multiplier;
val += var_data->u.n.offset; val += var_data->u.n.offset;
if (var->var_id < FIRST_SYNTH_VAR || !synth)
if (!synth)
return 0;
if (synth->synth_adjust && synth->synth_adjust(synth, var))
return 0;
if (var->var_id < FIRST_SYNTH_VAR)
return 0; return 0;
if (synth->synth_adjust)
return synth->synth_adjust(var);
if (!var_data->u.n.synth_fmt) if (!var_data->u.n.synth_fmt)
return 0; return 0;
@ -245,6 +248,7 @@ int spk_set_num_var(int input, struct st_var_header *var, int how)
synth_printf("%s", cp); synth_printf("%s", cp);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(spk_set_num_var);
int spk_set_string_var(const char *page, struct st_var_header *var, int len) int spk_set_string_var(const char *page, struct st_var_header *var, int len)
{ {

View File

@ -27,9 +27,6 @@ menuconfig ACPI
Management (APM) specification. If both ACPI and APM support Management (APM) specification. If both ACPI and APM support
are configured, ACPI is used. are configured, ACPI is used.
The project home page for the Linux ACPI subsystem is here:
<https://01.org/linux-acpi>
Linux support for ACPI is based on Intel Corporation's ACPI Linux support for ACPI is based on Intel Corporation's ACPI
Component Architecture (ACPI CA). For more information on the Component Architecture (ACPI CA). For more information on the
ACPI CA, see: ACPI CA, see:
@ -212,6 +209,7 @@ config ACPI_VIDEO
tristate "Video" tristate "Video"
depends on BACKLIGHT_CLASS_DEVICE depends on BACKLIGHT_CLASS_DEVICE
depends on INPUT depends on INPUT
depends on ACPI_WMI || !X86
select THERMAL select THERMAL
help help
This driver implements the ACPI Extensions For Display Adapters This driver implements the ACPI Extensions For Display Adapters
@ -347,7 +345,6 @@ config ACPI_CUSTOM_DSDT_FILE
depends on !STANDALONE depends on !STANDALONE
help help
This option supports a custom DSDT by linking it into the kernel. This option supports a custom DSDT by linking it into the kernel.
See Documentation/admin-guide/acpi/dsdt-override.rst
Enter the full path name to the file which includes the AmlCode Enter the full path name to the file which includes the AmlCode
or dsdt_aml_code declaration. or dsdt_aml_code declaration.

View File

@ -36,11 +36,6 @@ static int acpi_ac_add(struct acpi_device *device);
static int acpi_ac_remove(struct acpi_device *device); static int acpi_ac_remove(struct acpi_device *device);
static void acpi_ac_notify(struct acpi_device *device, u32 event); static void acpi_ac_notify(struct acpi_device *device, u32 event);
struct acpi_ac_bl {
const char *hid;
int hrv;
};
static const struct acpi_device_id ac_device_ids[] = { static const struct acpi_device_id ac_device_ids[] = {
{"ACPI0003", 0}, {"ACPI0003", 0},
{"", 0}, {"", 0},

View File

@ -21,6 +21,7 @@
static const struct acpi_device_id amba_id_list[] = { static const struct acpi_device_id amba_id_list[] = {
{"ARMH0061", 0}, /* PL061 GPIO Device */ {"ARMH0061", 0}, /* PL061 GPIO Device */
{"ARMH0330", 0}, /* ARM DMA Controller DMA-330 */
{"ARMHC500", 0}, /* ARM CoreSight ETM4x */ {"ARMHC500", 0}, /* ARM CoreSight ETM4x */
{"ARMHC501", 0}, /* ARM CoreSight ETR */ {"ARMHC501", 0}, /* ARM CoreSight ETR */
{"ARMHC502", 0}, /* ARM CoreSight STM */ {"ARMHC502", 0}, /* ARM CoreSight STM */
@ -48,6 +49,7 @@ static void amba_register_dummy_clk(void)
static int amba_handler_attach(struct acpi_device *adev, static int amba_handler_attach(struct acpi_device *adev,
const struct acpi_device_id *id) const struct acpi_device_id *id)
{ {
struct acpi_device *parent = acpi_dev_parent(adev);
struct amba_device *dev; struct amba_device *dev;
struct resource_entry *rentry; struct resource_entry *rentry;
struct list_head resource_list; struct list_head resource_list;
@ -97,8 +99,8 @@ static int amba_handler_attach(struct acpi_device *adev,
* attached to it, that physical device should be the parent of * attached to it, that physical device should be the parent of
* the amba device we are about to create. * the amba device we are about to create.
*/ */
if (adev->parent) if (parent)
dev->dev.parent = acpi_get_first_physical_node(adev->parent); dev->dev.parent = acpi_get_first_physical_node(parent);
ACPI_COMPANION_SET(&dev->dev, adev); ACPI_COMPANION_SET(&dev->dev, adev);

View File

@ -60,12 +60,6 @@ static int acpi_apd_setup(struct apd_private_data *pdata)
} }
#ifdef CONFIG_X86_AMD_PLATFORM_DEVICE #ifdef CONFIG_X86_AMD_PLATFORM_DEVICE
static int misc_check_res(struct acpi_resource *ares, void *data)
{
struct resource res;
return !acpi_dev_resource_memory(ares, &res);
}
static int fch_misc_setup(struct apd_private_data *pdata) static int fch_misc_setup(struct apd_private_data *pdata)
{ {
@ -82,8 +76,7 @@ static int fch_misc_setup(struct apd_private_data *pdata)
return -ENOMEM; return -ENOMEM;
INIT_LIST_HEAD(&resource_list); INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list, misc_check_res, ret = acpi_dev_get_memory_resources(adev, &resource_list);
NULL);
if (ret < 0) if (ret < 0)
return -ENOENT; return -ENOENT;

View File

@ -12,6 +12,7 @@
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <linux/edac.h> #include <linux/edac.h>
#include <linux/ras.h> #include <linux/ras.h>
#include <acpi/ghes.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/mce.h> #include <asm/mce.h>
@ -138,8 +139,8 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
int cpu = mce->extcpu; int cpu = mce->extcpu;
struct acpi_hest_generic_status *estatus, *tmp; struct acpi_hest_generic_status *estatus, *tmp;
struct acpi_hest_generic_data *gdata; struct acpi_hest_generic_data *gdata;
const guid_t *fru_id = &guid_null; const guid_t *fru_id;
char *fru_text = ""; char *fru_text;
guid_t *sec_type; guid_t *sec_type;
static u32 err_seq; static u32 err_seq;
@ -160,17 +161,23 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
/* log event via trace */ /* log event via trace */
err_seq++; err_seq++;
gdata = (struct acpi_hest_generic_data *)(tmp + 1); apei_estatus_for_each_section(tmp, gdata) {
if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID) if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
fru_id = (guid_t *)gdata->fru_id; fru_id = (guid_t *)gdata->fru_id;
if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT) else
fru_text = gdata->fru_text; fru_id = &guid_null;
sec_type = (guid_t *)gdata->section_type; if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { fru_text = gdata->fru_text;
struct cper_sec_mem_err *mem = (void *)(gdata + 1); else
if (gdata->error_data_length >= sizeof(*mem)) fru_text = "";
trace_extlog_mem_event(mem, err_seq, fru_id, fru_text, sec_type = (guid_t *)gdata->section_type;
(u8)gdata->error_severity); if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
struct cper_sec_mem_err *mem = (void *)(gdata + 1);
if (gdata->error_data_length >= sizeof(*mem))
trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
(u8)gdata->error_severity);
}
} }
out: out:

View File

@ -143,6 +143,23 @@ static const struct attribute_group boot_attr_group = {
static struct kobject *fpdt_kobj; static struct kobject *fpdt_kobj;
#if defined CONFIG_X86 && defined CONFIG_PHYS_ADDR_T_64BIT
#include <linux/processor.h>
static bool fpdt_address_valid(u64 address)
{
/*
* On some systems the table contains invalid addresses
* with unsuppored high address bits set, check for this.
*/
return !(address >> boot_cpu_data.x86_phys_bits);
}
#else
static bool fpdt_address_valid(u64 address)
{
return true;
}
#endif
static int fpdt_process_subtable(u64 address, u32 subtable_type) static int fpdt_process_subtable(u64 address, u32 subtable_type)
{ {
struct fpdt_subtable_header *subtable_header; struct fpdt_subtable_header *subtable_header;
@ -151,6 +168,11 @@ static int fpdt_process_subtable(u64 address, u32 subtable_type)
u32 length, offset; u32 length, offset;
int result; int result;
if (!fpdt_address_valid(address)) {
pr_info(FW_BUG "invalid physical address: 0x%llx!\n", address);
return -EINVAL;
}
subtable_header = acpi_os_map_memory(address, sizeof(*subtable_header)); subtable_header = acpi_os_map_memory(address, sizeof(*subtable_header));
if (!subtable_header) if (!subtable_header)
return -ENOMEM; return -ENOMEM;

View File

@ -167,10 +167,10 @@ static struct pwm_lookup byt_pwm_lookup[] = {
static void byt_pwm_setup(struct lpss_private_data *pdata) static void byt_pwm_setup(struct lpss_private_data *pdata)
{ {
struct acpi_device *adev = pdata->adev; u64 uid;
/* Only call pwm_add_table for the first PWM controller */ /* Only call pwm_add_table for the first PWM controller */
if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1")) if (acpi_dev_uid_to_integer(pdata->adev, &uid) || uid != 1)
return; return;
pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup)); pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup));
@ -180,14 +180,13 @@ static void byt_pwm_setup(struct lpss_private_data *pdata)
static void byt_i2c_setup(struct lpss_private_data *pdata) static void byt_i2c_setup(struct lpss_private_data *pdata)
{ {
const char *uid_str = acpi_device_uid(pdata->adev);
acpi_handle handle = pdata->adev->handle; acpi_handle handle = pdata->adev->handle;
unsigned long long shared_host = 0; unsigned long long shared_host = 0;
acpi_status status; acpi_status status;
long uid = 0; u64 uid;
/* Expected to always be true, but better safe then sorry */ /* Expected to always be successfull, but better safe then sorry */
if (uid_str && !kstrtol(uid_str, 10, &uid) && uid) { if (!acpi_dev_uid_to_integer(pdata->adev, &uid) && uid) {
/* Detect I2C bus shared with PUNIT and ignore its d3 status */ /* Detect I2C bus shared with PUNIT and ignore its d3 status */
status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host); status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
if (ACPI_SUCCESS(status) && shared_host) if (ACPI_SUCCESS(status) && shared_host)
@ -211,10 +210,10 @@ static struct pwm_lookup bsw_pwm_lookup[] = {
static void bsw_pwm_setup(struct lpss_private_data *pdata) static void bsw_pwm_setup(struct lpss_private_data *pdata)
{ {
struct acpi_device *adev = pdata->adev; u64 uid;
/* Only call pwm_add_table for the first PWM controller */ /* Only call pwm_add_table for the first PWM controller */
if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1")) if (acpi_dev_uid_to_integer(pdata->adev, &uid) || uid != 1)
return; return;
pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup)); pwm_add_table(bsw_pwm_lookup, ARRAY_SIZE(bsw_pwm_lookup));
@ -392,13 +391,6 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
#ifdef CONFIG_X86_INTEL_LPSS #ifdef CONFIG_X86_INTEL_LPSS
static int is_memory(struct acpi_resource *res, void *not_used)
{
struct resource r;
return !acpi_dev_resource_memory(res, &r);
}
/* LPSS main clock device. */ /* LPSS main clock device. */
static struct platform_device *lpss_clk_dev; static struct platform_device *lpss_clk_dev;
@ -659,29 +651,25 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
return -ENOMEM; return -ENOMEM;
INIT_LIST_HEAD(&resource_list); INIT_LIST_HEAD(&resource_list);
ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL); ret = acpi_dev_get_memory_resources(adev, &resource_list);
if (ret < 0) if (ret < 0)
goto err_out; goto err_out;
list_for_each_entry(rentry, &resource_list, node) rentry = list_first_entry_or_null(&resource_list, struct resource_entry, node);
if (resource_type(rentry->res) == IORESOURCE_MEM) { if (rentry) {
if (dev_desc->prv_size_override) if (dev_desc->prv_size_override)
pdata->mmio_size = dev_desc->prv_size_override; pdata->mmio_size = dev_desc->prv_size_override;
else else
pdata->mmio_size = resource_size(rentry->res); pdata->mmio_size = resource_size(rentry->res);
pdata->mmio_base = ioremap(rentry->res->start, pdata->mmio_base = ioremap(rentry->res->start, pdata->mmio_size);
pdata->mmio_size); }
break;
}
acpi_dev_free_resource_list(&resource_list); acpi_dev_free_resource_list(&resource_list);
if (!pdata->mmio_base) { if (!pdata->mmio_base) {
/* Avoid acpi_bus_attach() instantiating a pdev for this dev. */ /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */
adev->pnp.type.platform_id = 0; adev->pnp.type.platform_id = 0;
/* Skip the device, but continue the namespace scan. */ goto out_free;
ret = 0;
goto err_out;
} }
pdata->adev = adev; pdata->adev = adev;
@ -692,11 +680,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
if (dev_desc->flags & LPSS_CLK) { if (dev_desc->flags & LPSS_CLK) {
ret = register_device_clock(adev, pdata); ret = register_device_clock(adev, pdata);
if (ret) { if (ret)
/* Skip the device, but continue the namespace scan. */ goto out_free;
ret = 0;
goto err_out;
}
} }
/* /*
@ -708,15 +693,19 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
adev->driver_data = pdata; adev->driver_data = pdata;
pdev = acpi_create_platform_device(adev, dev_desc->properties); pdev = acpi_create_platform_device(adev, dev_desc->properties);
if (!IS_ERR_OR_NULL(pdev)) { if (IS_ERR_OR_NULL(pdev)) {
acpi_lpss_create_device_links(adev, pdev); adev->driver_data = NULL;
return 1; ret = PTR_ERR(pdev);
goto err_out;
} }
ret = PTR_ERR(pdev); acpi_lpss_create_device_links(adev, pdev);
adev->driver_data = NULL; return 1;
err_out: out_free:
/* Skip the device, but continue the namespace scan */
ret = 0;
err_out:
kfree(pdata); kfree(pdata);
return ret; return ret;
} }

View File

@ -23,6 +23,12 @@
#include <acpi/pcc.h> #include <acpi/pcc.h>
/*
* Arbitrary retries in case the remote processor is slow to respond
* to PCC commands
*/
#define PCC_CMD_WAIT_RETRIES_NUM 500ULL
struct pcc_data { struct pcc_data {
struct pcc_mbox_chan *pcc_chan; struct pcc_mbox_chan *pcc_chan;
void __iomem *pcc_comm_addr; void __iomem *pcc_comm_addr;
@ -63,6 +69,7 @@ acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function,
if (IS_ERR(data->pcc_chan)) { if (IS_ERR(data->pcc_chan)) {
pr_err("Failed to find PCC channel for subspace %d\n", pr_err("Failed to find PCC channel for subspace %d\n",
ctx->subspace_id); ctx->subspace_id);
kfree(data);
return AE_NOT_FOUND; return AE_NOT_FOUND;
} }
@ -72,6 +79,8 @@ acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function,
if (!data->pcc_comm_addr) { if (!data->pcc_comm_addr) {
pr_err("Failed to ioremap PCC comm region mem for %d\n", pr_err("Failed to ioremap PCC comm region mem for %d\n",
ctx->subspace_id); ctx->subspace_id);
pcc_mbox_free_channel(data->pcc_chan);
kfree(data);
return AE_NO_MEMORY; return AE_NO_MEMORY;
} }
@ -86,6 +95,7 @@ acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr,
{ {
int ret; int ret;
struct pcc_data *data = region_context; struct pcc_data *data = region_context;
u64 usecs_lat;
reinit_completion(&data->done); reinit_completion(&data->done);
@ -96,10 +106,22 @@ acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr,
if (ret < 0) if (ret < 0)
return AE_ERROR; return AE_ERROR;
if (data->pcc_chan->mchan->mbox->txdone_irq) if (data->pcc_chan->mchan->mbox->txdone_irq) {
wait_for_completion(&data->done); /*
* pcc_chan->latency is just a Nominal value. In reality the remote
* processor could be much slower to reply. So add an arbitrary
* amount of wait on top of Nominal.
*/
usecs_lat = PCC_CMD_WAIT_RETRIES_NUM * data->pcc_chan->latency;
ret = wait_for_completion_timeout(&data->done,
usecs_to_jiffies(usecs_lat));
if (ret == 0) {
pr_err("PCC command executed timeout!\n");
return AE_TIME;
}
}
mbox_client_txdone(data->pcc_chan->mchan, ret); mbox_chan_txdone(data->pcc_chan->mchan, ret);
memcpy_fromio(value, data->pcc_comm_addr, data->ctx.length); memcpy_fromio(value, data->pcc_comm_addr, data->ctx.length);

View File

@ -20,13 +20,13 @@
#include "internal.h" #include "internal.h"
static const struct acpi_device_id forbidden_id_list[] = { static const struct acpi_device_id forbidden_id_list[] = {
{"ACPI0009", 0}, /* IOxAPIC */
{"ACPI000A", 0}, /* IOAPIC */
{"PNP0000", 0}, /* PIC */ {"PNP0000", 0}, /* PIC */
{"PNP0100", 0}, /* Timer */ {"PNP0100", 0}, /* Timer */
{"PNP0200", 0}, /* AT DMA Controller */ {"PNP0200", 0}, /* AT DMA Controller */
{"ACPI0009", 0}, /* IOxAPIC */
{"ACPI000A", 0}, /* IOAPIC */
{"SMB0001", 0}, /* ACPI SMBUS virtual device */ {"SMB0001", 0}, /* ACPI SMBUS virtual device */
{"", 0}, { }
}; };
static struct platform_device *acpi_platform_device_find_by_companion(struct acpi_device *adev) static struct platform_device *acpi_platform_device_find_by_companion(struct acpi_device *adev)
@ -78,7 +78,7 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
* If the device has parent we need to take its resources into * If the device has parent we need to take its resources into
* account as well because this device might consume part of those. * account as well because this device might consume part of those.
*/ */
parent = acpi_get_first_physical_node(adev->parent); parent = acpi_get_first_physical_node(acpi_dev_parent(adev));
if (parent && dev_is_pci(parent)) if (parent && dev_is_pci(parent))
dest->parent = pci_find_resource(to_pci_dev(parent), dest); dest->parent = pci_find_resource(to_pci_dev(parent), dest);
} }
@ -97,6 +97,7 @@ static void acpi_platform_fill_resource(struct acpi_device *adev,
struct platform_device *acpi_create_platform_device(struct acpi_device *adev, struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
const struct property_entry *properties) const struct property_entry *properties)
{ {
struct acpi_device *parent = acpi_dev_parent(adev);
struct platform_device *pdev = NULL; struct platform_device *pdev = NULL;
struct platform_device_info pdevinfo; struct platform_device_info pdevinfo;
struct resource_entry *rentry; struct resource_entry *rentry;
@ -113,13 +114,11 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
INIT_LIST_HEAD(&resource_list); INIT_LIST_HEAD(&resource_list);
count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
if (count < 0) { if (count < 0)
return NULL; return NULL;
} else if (count > 0) { if (count > 0) {
resources = kcalloc(count, sizeof(struct resource), resources = kcalloc(count, sizeof(*resources), GFP_KERNEL);
GFP_KERNEL);
if (!resources) { if (!resources) {
dev_err(&adev->dev, "No memory for resources\n");
acpi_dev_free_resource_list(&resource_list); acpi_dev_free_resource_list(&resource_list);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
@ -137,10 +136,9 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
* attached to it, that physical device should be the parent of the * attached to it, that physical device should be the parent of the
* platform device we are about to create. * platform device we are about to create.
*/ */
pdevinfo.parent = adev->parent ? pdevinfo.parent = parent ? acpi_get_first_physical_node(parent) : NULL;
acpi_get_first_physical_node(adev->parent) : NULL;
pdevinfo.name = dev_name(&adev->dev); pdevinfo.name = dev_name(&adev->dev);
pdevinfo.id = -1; pdevinfo.id = PLATFORM_DEVID_NONE;
pdevinfo.res = resources; pdevinfo.res = resources;
pdevinfo.num_res = count; pdevinfo.num_res = count;
pdevinfo.fwnode = acpi_fwnode_handle(adev); pdevinfo.fwnode = acpi_fwnode_handle(adev);

View File

@ -47,9 +47,6 @@ module_param(brightness_switch_enabled, bool, 0644);
static bool allow_duplicates; static bool allow_duplicates;
module_param(allow_duplicates, bool, 0644); module_param(allow_duplicates, bool, 0644);
static int disable_backlight_sysfs_if = -1;
module_param(disable_backlight_sysfs_if, int, 0444);
#define REPORT_OUTPUT_KEY_EVENTS 0x01 #define REPORT_OUTPUT_KEY_EVENTS 0x01
#define REPORT_BRIGHTNESS_KEY_EVENTS 0x02 #define REPORT_BRIGHTNESS_KEY_EVENTS 0x02
static int report_key_events = -1; static int report_key_events = -1;
@ -73,6 +70,16 @@ module_param(device_id_scheme, bool, 0444);
static int only_lcd = -1; static int only_lcd = -1;
module_param(only_lcd, int, 0444); module_param(only_lcd, int, 0444);
/*
* Display probing is known to take up to 5 seconds, so delay the fallback
* backlight registration by 5 seconds + 3 seconds for some extra margin.
*/
static int register_backlight_delay = 8;
module_param(register_backlight_delay, int, 0444);
MODULE_PARM_DESC(register_backlight_delay,
"Delay in seconds before doing fallback (non GPU driver triggered) "
"backlight registration, set to 0 to disable.");
static bool may_report_brightness_keys; static bool may_report_brightness_keys;
static int register_count; static int register_count;
static DEFINE_MUTEX(register_count_mutex); static DEFINE_MUTEX(register_count_mutex);
@ -81,7 +88,9 @@ static LIST_HEAD(video_bus_head);
static int acpi_video_bus_add(struct acpi_device *device); static int acpi_video_bus_add(struct acpi_device *device);
static int acpi_video_bus_remove(struct acpi_device *device); static int acpi_video_bus_remove(struct acpi_device *device);
static void acpi_video_bus_notify(struct acpi_device *device, u32 event); static void acpi_video_bus_notify(struct acpi_device *device, u32 event);
void acpi_video_detect_exit(void); static void acpi_video_bus_register_backlight_work(struct work_struct *ignored);
static DECLARE_DELAYED_WORK(video_bus_register_backlight_work,
acpi_video_bus_register_backlight_work);
/* /*
* Indices in the _BCL method response: the first two items are special, * Indices in the _BCL method response: the first two items are special,
@ -382,14 +391,6 @@ static int video_set_bqc_offset(const struct dmi_system_id *d)
return 0; return 0;
} }
static int video_disable_backlight_sysfs_if(
const struct dmi_system_id *d)
{
if (disable_backlight_sysfs_if == -1)
disable_backlight_sysfs_if = 1;
return 0;
}
static int video_set_device_id_scheme(const struct dmi_system_id *d) static int video_set_device_id_scheme(const struct dmi_system_id *d)
{ {
device_id_scheme = true; device_id_scheme = true;
@ -462,40 +463,6 @@ static const struct dmi_system_id video_dmi_table[] = {
}, },
}, },
/*
* Some machines have a broken acpi-video interface for brightness
* control, but still need an acpi_video_device_lcd_set_level() call
* on resume to turn the backlight power on. We Enable backlight
* control on these systems, but do not register a backlight sysfs
* as brightness control does not work.
*/
{
/* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
.callback = video_disable_backlight_sysfs_if,
.ident = "Toshiba Portege R700",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"),
},
},
{
/* https://bugs.freedesktop.org/show_bug.cgi?id=82634 */
.callback = video_disable_backlight_sysfs_if,
.ident = "Toshiba Portege R830",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R830"),
},
},
{
/* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
.callback = video_disable_backlight_sysfs_if,
.ident = "Toshiba Satellite R830",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE R830"),
},
},
/* /*
* Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set * Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set
* but the IDs actually follow the Device ID Scheme. * but the IDs actually follow the Device ID Scheme.
@ -1758,9 +1725,6 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
if (result) if (result)
return; return;
if (disable_backlight_sysfs_if > 0)
return;
name = kasprintf(GFP_KERNEL, "acpi_video%d", count); name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
if (!name) if (!name)
return; return;
@ -1859,8 +1823,6 @@ static int acpi_video_bus_register_backlight(struct acpi_video_bus *video)
if (video->backlight_registered) if (video->backlight_registered)
return 0; return 0;
acpi_video_run_bcl_for_osi(video);
if (acpi_video_get_backlight_type() != acpi_backlight_video) if (acpi_video_get_backlight_type() != acpi_backlight_video)
return 0; return 0;
@ -2030,7 +1992,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
acpi_status status; acpi_status status;
status = acpi_walk_namespace(ACPI_TYPE_DEVICE, status = acpi_walk_namespace(ACPI_TYPE_DEVICE,
device->parent->handle, 1, acpi_dev_parent(device)->handle, 1,
acpi_video_bus_match, NULL, acpi_video_bus_match, NULL,
device, NULL); device, NULL);
if (status == AE_ALREADY_EXISTS) { if (status == AE_ALREADY_EXISTS) {
@ -2086,7 +2048,11 @@ static int acpi_video_bus_add(struct acpi_device *device)
list_add_tail(&video->entry, &video_bus_head); list_add_tail(&video->entry, &video_bus_head);
mutex_unlock(&video_list_lock); mutex_unlock(&video_list_lock);
acpi_video_bus_register_backlight(video); /*
* The userspace visible backlight_device gets registered separately
* from acpi_video_register_backlight().
*/
acpi_video_run_bcl_for_osi(video);
acpi_video_bus_add_notify_handler(video); acpi_video_bus_add_notify_handler(video);
return 0; return 0;
@ -2111,20 +2077,25 @@ static int acpi_video_bus_remove(struct acpi_device *device)
video = acpi_driver_data(device); video = acpi_driver_data(device);
acpi_video_bus_remove_notify_handler(video);
acpi_video_bus_unregister_backlight(video);
acpi_video_bus_put_devices(video);
mutex_lock(&video_list_lock); mutex_lock(&video_list_lock);
list_del(&video->entry); list_del(&video->entry);
mutex_unlock(&video_list_lock); mutex_unlock(&video_list_lock);
acpi_video_bus_remove_notify_handler(video);
acpi_video_bus_unregister_backlight(video);
acpi_video_bus_put_devices(video);
kfree(video->attached_array); kfree(video->attached_array);
kfree(video); kfree(video);
return 0; return 0;
} }
static void acpi_video_bus_register_backlight_work(struct work_struct *ignored)
{
acpi_video_register_backlight();
}
static int __init is_i740(struct pci_dev *dev) static int __init is_i740(struct pci_dev *dev)
{ {
if (dev->device == 0x00D1) if (dev->device == 0x00D1)
@ -2235,6 +2206,18 @@ int acpi_video_register(void)
*/ */
register_count = 1; register_count = 1;
/*
* acpi_video_bus_add() skips registering the userspace visible
* backlight_device. The intend is for this to be registered by the
* drm/kms driver calling acpi_video_register_backlight() *after* it is
* done setting up its own native backlight device. The delayed work
* ensures that acpi_video_register_backlight() always gets called
* eventually, in case there is no drm/kms driver or it is disabled.
*/
if (register_backlight_delay)
schedule_delayed_work(&video_bus_register_backlight_work,
register_backlight_delay * HZ);
leave: leave:
mutex_unlock(&register_count_mutex); mutex_unlock(&register_count_mutex);
return ret; return ret;
@ -2245,6 +2228,7 @@ void acpi_video_unregister(void)
{ {
mutex_lock(&register_count_mutex); mutex_lock(&register_count_mutex);
if (register_count) { if (register_count) {
cancel_delayed_work_sync(&video_bus_register_backlight_work);
acpi_bus_unregister_driver(&acpi_video_bus); acpi_bus_unregister_driver(&acpi_video_bus);
register_count = 0; register_count = 0;
may_report_brightness_keys = false; may_report_brightness_keys = false;
@ -2253,19 +2237,16 @@ void acpi_video_unregister(void)
} }
EXPORT_SYMBOL(acpi_video_unregister); EXPORT_SYMBOL(acpi_video_unregister);
void acpi_video_unregister_backlight(void) void acpi_video_register_backlight(void)
{ {
struct acpi_video_bus *video; struct acpi_video_bus *video;
mutex_lock(&register_count_mutex); mutex_lock(&video_list_lock);
if (register_count) { list_for_each_entry(video, &video_bus_head, entry)
mutex_lock(&video_list_lock); acpi_video_bus_register_backlight(video);
list_for_each_entry(video, &video_bus_head, entry) mutex_unlock(&video_list_lock);
acpi_video_bus_unregister_backlight(video);
mutex_unlock(&video_list_lock);
}
mutex_unlock(&register_count_mutex);
} }
EXPORT_SYMBOL(acpi_video_register_backlight);
bool acpi_video_handles_brightness_key_presses(void) bool acpi_video_handles_brightness_key_presses(void)
{ {
@ -2302,7 +2283,6 @@ static int __init acpi_video_init(void)
static void __exit acpi_video_exit(void) static void __exit acpi_video_exit(void)
{ {
acpi_video_detect_exit();
acpi_video_unregister(); acpi_video_unregister();
} }

View File

@ -125,12 +125,9 @@ EXPORT_SYMBOL_GPL(apei_exec_write_register);
int apei_exec_write_register_value(struct apei_exec_context *ctx, int apei_exec_write_register_value(struct apei_exec_context *ctx,
struct acpi_whea_header *entry) struct acpi_whea_header *entry)
{ {
int rc;
ctx->value = entry->value; ctx->value = entry->value;
rc = apei_exec_write_register(ctx, entry);
return rc; return apei_exec_write_register(ctx, entry);
} }
EXPORT_SYMBOL_GPL(apei_exec_write_register_value); EXPORT_SYMBOL_GPL(apei_exec_write_register_value);

View File

@ -90,6 +90,9 @@ static void __init bert_print_all(struct acpi_bert_region *region,
if (skipped) if (skipped)
pr_info(HW_ERR "Skipped %d error records\n", skipped); pr_info(HW_ERR "Skipped %d error records\n", skipped);
if (printed + skipped)
pr_info("Total records found: %d\n", printed + skipped);
} }
static int __init setup_bert_disable(char *str) static int __init setup_bert_disable(char *str)

View File

@ -1020,14 +1020,10 @@ static int reader_pos;
static int erst_open_pstore(struct pstore_info *psi) static int erst_open_pstore(struct pstore_info *psi)
{ {
int rc;
if (erst_disable) if (erst_disable)
return -ENODEV; return -ENODEV;
rc = erst_get_record_id_begin(&reader_pos); return erst_get_record_id_begin(&reader_pos);
return rc;
} }
static int erst_close_pstore(struct pstore_info *psi) static int erst_close_pstore(struct pstore_info *psi)

View File

@ -163,7 +163,7 @@ static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx)
clear_fixmap(fixmap_idx); clear_fixmap(fixmap_idx);
} }
int ghes_estatus_pool_init(int num_ghes) int ghes_estatus_pool_init(unsigned int num_ghes)
{ {
unsigned long addr, len; unsigned long addr, len;
int rc; int rc;
@ -985,7 +985,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
ghes_estatus_cache_add(generic, estatus); ghes_estatus_cache_add(generic, estatus);
} }
if (task_work_pending && current->mm != &init_mm) { if (task_work_pending && current->mm) {
estatus_node->task_work.func = ghes_kick_task_work; estatus_node->task_work.func = ghes_kick_task_work;
estatus_node->task_work_cpu = smp_processor_id(); estatus_node->task_work_cpu = smp_processor_id();
ret = task_work_add(current, &estatus_node->task_work, ret = task_work_add(current, &estatus_node->task_work,

View File

@ -4,11 +4,12 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/dma-direct.h> #include <linux/dma-direct.h>
void acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) void acpi_arch_dma_setup(struct device *dev)
{ {
int ret; int ret;
u64 end, mask; u64 end, mask;
u64 dmaaddr = 0, size = 0, offset = 0; u64 size = 0;
const struct bus_dma_region *map = NULL;
/* /*
* If @dev is expected to be DMA-capable then the bus code that created * If @dev is expected to be DMA-capable then the bus code that created
@ -26,7 +27,19 @@ void acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
else else
size = 1ULL << 32; size = 1ULL << 32;
ret = acpi_dma_get_range(dev, &dmaaddr, &offset, &size); ret = acpi_dma_get_range(dev, &map);
if (!ret && map) {
const struct bus_dma_region *r = map;
for (end = 0; r->size; r++) {
if (r->dma_start + r->size - 1 > end)
end = r->dma_start + r->size - 1;
}
size = end + 1;
dev->dma_range_map = map;
}
if (ret == -ENODEV) if (ret == -ENODEV)
ret = iort_dma_get_ranges(dev, &size); ret = iort_dma_get_ranges(dev, &size);
if (!ret) { if (!ret) {
@ -34,17 +47,10 @@ void acpi_arch_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size)
* Limit coherent and dma mask based on size retrieved from * Limit coherent and dma mask based on size retrieved from
* firmware. * firmware.
*/ */
end = dmaaddr + size - 1; end = size - 1;
mask = DMA_BIT_MASK(ilog2(end) + 1); mask = DMA_BIT_MASK(ilog2(end) + 1);
dev->bus_dma_limit = end; dev->bus_dma_limit = end;
dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask); dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask);
*dev->dma_mask = min(*dev->dma_mask, mask); *dev->dma_mask = min(*dev->dma_mask, mask);
} }
*dma_addr = dmaaddr;
*dma_size = size;
ret = dma_direct_set_offset(dev, dmaaddr + offset, dmaaddr, size);
dev_dbg(dev, "dma_offset(%#08llx)%s\n", offset, ret ? " failed!" : "");
} }

View File

@ -1142,7 +1142,8 @@ static void iort_iommu_msi_get_resv_regions(struct device *dev,
struct iommu_resv_region *region; struct iommu_resv_region *region;
region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K, region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K,
prot, IOMMU_RESV_MSI); prot, IOMMU_RESV_MSI,
GFP_KERNEL);
if (region) if (region)
list_add_tail(&region->list, head); list_add_tail(&region->list, head);
} }

View File

@ -456,7 +456,7 @@ static void acpi_bus_osc_negotiate_usb_control(void)
Notification Handling Notification Handling
-------------------------------------------------------------------------- */ -------------------------------------------------------------------------- */
/** /*
* acpi_bus_notify * acpi_bus_notify
* --------------- * ---------------
* Callback for all 'system-level' device notifications (values 0x00-0x7F). * Callback for all 'system-level' device notifications (values 0x00-0x7F).
@ -511,7 +511,7 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
break; break;
} }
adev = acpi_bus_get_acpi_device(handle); adev = acpi_get_acpi_dev(handle);
if (!adev) if (!adev)
goto err; goto err;
@ -524,14 +524,14 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
} }
if (!hotplug_event) { if (!hotplug_event) {
acpi_bus_put_acpi_device(adev); acpi_put_acpi_dev(adev);
return; return;
} }
if (ACPI_SUCCESS(acpi_hotplug_schedule(adev, type))) if (ACPI_SUCCESS(acpi_hotplug_schedule(adev, type)))
return; return;
acpi_bus_put_acpi_device(adev); acpi_put_acpi_dev(adev);
err: err:
acpi_evaluate_ost(handle, type, ost_code, NULL); acpi_evaluate_ost(handle, type, ost_code, NULL);
@ -802,7 +802,7 @@ static bool acpi_of_modalias(struct acpi_device *adev,
str = obj->string.pointer; str = obj->string.pointer;
chr = strchr(str, ','); chr = strchr(str, ',');
strlcpy(modalias, chr ? chr + 1 : str, len); strscpy(modalias, chr ? chr + 1 : str, len);
return true; return true;
} }
@ -822,7 +822,7 @@ void acpi_set_modalias(struct acpi_device *adev, const char *default_id,
char *modalias, size_t len) char *modalias, size_t len)
{ {
if (!acpi_of_modalias(adev, modalias, len)) if (!acpi_of_modalias(adev, modalias, len))
strlcpy(modalias, default_id, len); strscpy(modalias, default_id, len);
} }
EXPORT_SYMBOL_GPL(acpi_set_modalias); EXPORT_SYMBOL_GPL(acpi_set_modalias);
@ -925,12 +925,13 @@ static const void *acpi_of_device_get_match_data(const struct device *dev)
const void *acpi_device_get_match_data(const struct device *dev) const void *acpi_device_get_match_data(const struct device *dev)
{ {
const struct acpi_device_id *acpi_ids = dev->driver->acpi_match_table;
const struct acpi_device_id *match; const struct acpi_device_id *match;
if (!dev->driver->acpi_match_table) if (!acpi_ids)
return acpi_of_device_get_match_data(dev); return acpi_of_device_get_match_data(dev);
match = acpi_match_device(dev->driver->acpi_match_table, dev); match = acpi_match_device(acpi_ids, dev);
if (!match) if (!match)
return NULL; return NULL;
@ -948,14 +949,13 @@ EXPORT_SYMBOL(acpi_match_device_ids);
bool acpi_driver_match_device(struct device *dev, bool acpi_driver_match_device(struct device *dev,
const struct device_driver *drv) const struct device_driver *drv)
{ {
if (!drv->acpi_match_table) const struct acpi_device_id *acpi_ids = drv->acpi_match_table;
return acpi_of_match_device(ACPI_COMPANION(dev), const struct of_device_id *of_ids = drv->of_match_table;
drv->of_match_table,
NULL);
return __acpi_match_device(acpi_companion_match(dev), if (!acpi_ids)
drv->acpi_match_table, drv->of_match_table, return acpi_of_match_device(ACPI_COMPANION(dev), of_ids, NULL);
NULL, NULL);
return __acpi_match_device(acpi_companion_match(dev), acpi_ids, of_ids, NULL, NULL);
} }
EXPORT_SYMBOL_GPL(acpi_driver_match_device); EXPORT_SYMBOL_GPL(acpi_driver_match_device);
@ -973,16 +973,13 @@ EXPORT_SYMBOL_GPL(acpi_driver_match_device);
*/ */
int acpi_bus_register_driver(struct acpi_driver *driver) int acpi_bus_register_driver(struct acpi_driver *driver)
{ {
int ret;
if (acpi_disabled) if (acpi_disabled)
return -ENODEV; return -ENODEV;
driver->drv.name = driver->name; driver->drv.name = driver->name;
driver->drv.bus = &acpi_bus_type; driver->drv.bus = &acpi_bus_type;
driver->drv.owner = driver->owner; driver->drv.owner = driver->owner;
ret = driver_register(&driver->drv); return driver_register(&driver->drv);
return ret;
} }
EXPORT_SYMBOL(acpi_bus_register_driver); EXPORT_SYMBOL(acpi_bus_register_driver);

View File

@ -424,6 +424,9 @@ bool acpi_cpc_valid(void)
struct cpc_desc *cpc_ptr; struct cpc_desc *cpc_ptr;
int cpu; int cpu;
if (acpi_disabled)
return false;
for_each_present_cpu(cpu) { for_each_present_cpu(cpu) {
cpc_ptr = per_cpu(cpc_desc_ptr, cpu); cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
if (!cpc_ptr) if (!cpc_ptr)
@ -1240,6 +1243,48 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
} }
EXPORT_SYMBOL_GPL(cppc_get_perf_caps); EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
/**
* cppc_perf_ctrs_in_pcc - Check if any perf counters are in a PCC region.
*
* CPPC has flexibility about how CPU performance counters are accessed.
* One of the choices is PCC regions, which can have a high access latency. This
* routine allows callers of cppc_get_perf_ctrs() to know this ahead of time.
*
* Return: true if any of the counters are in PCC regions, false otherwise
*/
bool cppc_perf_ctrs_in_pcc(void)
{
int cpu;
for_each_present_cpu(cpu) {
struct cpc_register_resource *ref_perf_reg;
struct cpc_desc *cpc_desc;
cpc_desc = per_cpu(cpc_desc_ptr, cpu);
if (CPC_IN_PCC(&cpc_desc->cpc_regs[DELIVERED_CTR]) ||
CPC_IN_PCC(&cpc_desc->cpc_regs[REFERENCE_CTR]) ||
CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME]))
return true;
ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
/*
* If reference perf register is not supported then we should
* use the nominal perf value
*/
if (!CPC_SUPPORTED(ref_perf_reg))
ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
if (CPC_IN_PCC(ref_perf_reg))
return true;
}
return false;
}
EXPORT_SYMBOL_GPL(cppc_perf_ctrs_in_pcc);
/** /**
* cppc_get_perf_ctrs - Read a CPU's performance feedback counters. * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
* @cpunum: CPU from which to read counters. * @cpunum: CPU from which to read counters.

View File

@ -75,15 +75,17 @@ static int acpi_dev_pm_explicit_get(struct acpi_device *device, int *state)
int acpi_device_get_power(struct acpi_device *device, int *state) int acpi_device_get_power(struct acpi_device *device, int *state)
{ {
int result = ACPI_STATE_UNKNOWN; int result = ACPI_STATE_UNKNOWN;
struct acpi_device *parent;
int error; int error;
if (!device || !state) if (!device || !state)
return -EINVAL; return -EINVAL;
parent = acpi_dev_parent(device);
if (!device->flags.power_manageable) { if (!device->flags.power_manageable) {
/* TBD: Non-recursive algorithm for walking up hierarchy. */ /* TBD: Non-recursive algorithm for walking up hierarchy. */
*state = device->parent ? *state = parent ? parent->power.state : ACPI_STATE_D0;
device->parent->power.state : ACPI_STATE_D0;
goto out; goto out;
} }
@ -122,10 +124,10 @@ int acpi_device_get_power(struct acpi_device *device, int *state)
* point, the fact that the device is in D0 implies that the parent has * point, the fact that the device is in D0 implies that the parent has
* to be in D0 too, except if ignore_parent is set. * to be in D0 too, except if ignore_parent is set.
*/ */
if (!device->power.flags.ignore_parent && device->parent if (!device->power.flags.ignore_parent && parent &&
&& device->parent->power.state == ACPI_STATE_UNKNOWN parent->power.state == ACPI_STATE_UNKNOWN &&
&& result == ACPI_STATE_D0) result == ACPI_STATE_D0)
device->parent->power.state = ACPI_STATE_D0; parent->power.state = ACPI_STATE_D0;
*state = result; *state = result;
@ -191,13 +193,17 @@ int acpi_device_set_power(struct acpi_device *device, int state)
return -ENODEV; return -ENODEV;
} }
if (!device->power.flags.ignore_parent && device->parent && if (!device->power.flags.ignore_parent) {
state < device->parent->power.state) { struct acpi_device *parent;
acpi_handle_debug(device->handle,
"Cannot transition to %s for parent in %s\n", parent = acpi_dev_parent(device);
acpi_power_state_string(state), if (parent && state < parent->power.state) {
acpi_power_state_string(device->parent->power.state)); acpi_handle_debug(device->handle,
return -ENODEV; "Cannot transition to %s for parent in %s\n",
acpi_power_state_string(state),
acpi_power_state_string(parent->power.state));
return -ENODEV;
}
} }
/* /*
@ -497,7 +503,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
acpi_handle_debug(handle, "Wake notify\n"); acpi_handle_debug(handle, "Wake notify\n");
adev = acpi_bus_get_acpi_device(handle); adev = acpi_get_acpi_dev(handle);
if (!adev) if (!adev)
return; return;
@ -515,7 +521,7 @@ static void acpi_pm_notify_handler(acpi_handle handle, u32 val, void *not_used)
mutex_unlock(&acpi_pm_notifier_lock); mutex_unlock(&acpi_pm_notifier_lock);
acpi_bus_put_acpi_device(adev); acpi_put_acpi_dev(adev);
} }
/** /**
@ -681,7 +687,22 @@ static int acpi_dev_pm_get_state(struct device *dev, struct acpi_device *adev,
d_min = ret; d_min = ret;
wakeup = device_may_wakeup(dev) && adev->wakeup.flags.valid wakeup = device_may_wakeup(dev) && adev->wakeup.flags.valid
&& adev->wakeup.sleep_state >= target_state; && adev->wakeup.sleep_state >= target_state;
} else if (device_may_wakeup(dev) && dev->power.wakeirq) {
/*
* The ACPI subsystem doesn't manage the wake bit for IRQs
* defined with ExclusiveAndWake and SharedAndWake. Instead we
* expect them to be managed via the PM subsystem. Drivers
* should call dev_pm_set_wake_irq to register an IRQ as a wake
* source.
*
* If a device has a wake IRQ attached we need to check the
* _S0W method to get the correct wake D-state. Otherwise we
* end up putting the device into D3Cold which will more than
* likely disable wake functionality.
*/
wakeup = true;
} else { } else {
/* ACPI GPE is specified in _PRW. */
wakeup = adev->wakeup.flags.valid; wakeup = adev->wakeup.flags.valid;
} }
@ -1460,7 +1481,7 @@ EXPORT_SYMBOL_GPL(acpi_storage_d3);
* not valid to ask for the ACPI power state of the device in that time frame. * not valid to ask for the ACPI power state of the device in that time frame.
* *
* This function is intended to be used in a driver's probe or remove * This function is intended to be used in a driver's probe or remove
* function. See Documentation/firmware-guide/acpi/low-power-probe.rst for * function. See Documentation/firmware-guide/acpi/non-d0-probe.rst for
* more information. * more information.
*/ */
bool acpi_dev_state_d0(struct device *dev) bool acpi_dev_state_d0(struct device *dev)

View File

@ -11,9 +11,6 @@ menuconfig ACPI_DPTF
a coordinated approach for different policies to effect the hardware a coordinated approach for different policies to effect the hardware
state of a system. state of a system.
For more information see:
<https://01.org/intel%C2%AE-dynamic-platform-and-thermal-framework-dptf-chromium-os/overview>
if ACPI_DPTF if ACPI_DPTF
config DPTF_POWER config DPTF_POWER

View File

@ -917,14 +917,10 @@ EXPORT_SYMBOL(ec_read);
int ec_write(u8 addr, u8 val) int ec_write(u8 addr, u8 val)
{ {
int err;
if (!first_ec) if (!first_ec)
return -ENODEV; return -ENODEV;
err = acpi_ec_write(first_ec, addr, val); return acpi_ec_write(first_ec, addr, val);
return err;
} }
EXPORT_SYMBOL(ec_write); EXPORT_SYMBOL(ec_write);

View File

@ -19,43 +19,12 @@
#include "fan.h" #include "fan.h"
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Fan Driver");
MODULE_LICENSE("GPL");
static int acpi_fan_probe(struct platform_device *pdev);
static int acpi_fan_remove(struct platform_device *pdev);
static const struct acpi_device_id fan_device_ids[] = { static const struct acpi_device_id fan_device_ids[] = {
ACPI_FAN_DEVICE_IDS, ACPI_FAN_DEVICE_IDS,
{"", 0}, {"", 0},
}; };
MODULE_DEVICE_TABLE(acpi, fan_device_ids); MODULE_DEVICE_TABLE(acpi, fan_device_ids);
#ifdef CONFIG_PM_SLEEP
static int acpi_fan_suspend(struct device *dev);
static int acpi_fan_resume(struct device *dev);
static const struct dev_pm_ops acpi_fan_pm = {
.resume = acpi_fan_resume,
.freeze = acpi_fan_suspend,
.thaw = acpi_fan_resume,
.restore = acpi_fan_resume,
};
#define FAN_PM_OPS_PTR (&acpi_fan_pm)
#else
#define FAN_PM_OPS_PTR NULL
#endif
static struct platform_driver acpi_fan_driver = {
.probe = acpi_fan_probe,
.remove = acpi_fan_remove,
.driver = {
.name = "acpi-fan",
.acpi_match_table = fan_device_ids,
.pm = FAN_PM_OPS_PTR,
},
};
/* thermal cooling device callbacks */ /* thermal cooling device callbacks */
static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long
*state) *state)
@ -459,6 +428,33 @@ static int acpi_fan_resume(struct device *dev)
return result; return result;
} }
static const struct dev_pm_ops acpi_fan_pm = {
.resume = acpi_fan_resume,
.freeze = acpi_fan_suspend,
.thaw = acpi_fan_resume,
.restore = acpi_fan_resume,
};
#define FAN_PM_OPS_PTR (&acpi_fan_pm)
#else
#define FAN_PM_OPS_PTR NULL
#endif #endif
static struct platform_driver acpi_fan_driver = {
.probe = acpi_fan_probe,
.remove = acpi_fan_remove,
.driver = {
.name = "acpi-fan",
.acpi_match_table = fan_device_ids,
.pm = FAN_PM_OPS_PTR,
},
};
module_platform_driver(acpi_fan_driver); module_platform_driver(acpi_fan_driver);
MODULE_AUTHOR("Paul Diefenbaugh");
MODULE_DESCRIPTION("ACPI Fan Driver");
MODULE_LICENSE("GPL");

View File

@ -102,10 +102,10 @@ struct acpi_device_bus_id {
struct list_head node; struct list_head node;
}; };
int acpi_device_add(struct acpi_device *device,
void (*release)(struct device *));
void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
int type); int type, void (*release)(struct device *));
int acpi_tie_acpi_dev(struct acpi_device *adev);
int acpi_device_add(struct acpi_device *device);
int acpi_device_setup_files(struct acpi_device *dev); int acpi_device_setup_files(struct acpi_device *dev);
void acpi_device_remove_files(struct acpi_device *dev); void acpi_device_remove_files(struct acpi_device *dev);
void acpi_device_add_finalize(struct acpi_device *device); void acpi_device_add_finalize(struct acpi_device *device);

View File

@ -118,12 +118,12 @@ acpi_get_irq_source_fwhandle(const struct acpi_resource_source *source,
if (WARN_ON(ACPI_FAILURE(status))) if (WARN_ON(ACPI_FAILURE(status)))
return NULL; return NULL;
device = acpi_bus_get_acpi_device(handle); device = acpi_get_acpi_dev(handle);
if (WARN_ON(!device)) if (WARN_ON(!device))
return NULL; return NULL;
result = &device->fwnode; result = &device->fwnode;
acpi_bus_put_acpi_device(device); acpi_put_acpi_dev(device);
return result; return result;
} }
@ -147,6 +147,7 @@ struct acpi_irq_parse_one_ctx {
* @polarity: polarity attributes of hwirq * @polarity: polarity attributes of hwirq
* @polarity: polarity attributes of hwirq * @polarity: polarity attributes of hwirq
* @shareable: shareable attributes of hwirq * @shareable: shareable attributes of hwirq
* @wake_capable: wake capable attribute of hwirq
* @ctx: acpi_irq_parse_one_ctx updated by this function * @ctx: acpi_irq_parse_one_ctx updated by this function
* *
* Description: * Description:
@ -156,12 +157,13 @@ struct acpi_irq_parse_one_ctx {
static inline void acpi_irq_parse_one_match(struct fwnode_handle *fwnode, static inline void acpi_irq_parse_one_match(struct fwnode_handle *fwnode,
u32 hwirq, u8 triggering, u32 hwirq, u8 triggering,
u8 polarity, u8 shareable, u8 polarity, u8 shareable,
u8 wake_capable,
struct acpi_irq_parse_one_ctx *ctx) struct acpi_irq_parse_one_ctx *ctx)
{ {
if (!fwnode) if (!fwnode)
return; return;
ctx->rc = 0; ctx->rc = 0;
*ctx->res_flags = acpi_dev_irq_flags(triggering, polarity, shareable); *ctx->res_flags = acpi_dev_irq_flags(triggering, polarity, shareable, wake_capable);
ctx->fwspec->fwnode = fwnode; ctx->fwspec->fwnode = fwnode;
ctx->fwspec->param[0] = hwirq; ctx->fwspec->param[0] = hwirq;
ctx->fwspec->param[1] = acpi_dev_get_irq_type(triggering, polarity); ctx->fwspec->param[1] = acpi_dev_get_irq_type(triggering, polarity);
@ -204,7 +206,7 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares,
fwnode = acpi_get_gsi_domain_id(irq->interrupts[ctx->index]); fwnode = acpi_get_gsi_domain_id(irq->interrupts[ctx->index]);
acpi_irq_parse_one_match(fwnode, irq->interrupts[ctx->index], acpi_irq_parse_one_match(fwnode, irq->interrupts[ctx->index],
irq->triggering, irq->polarity, irq->triggering, irq->polarity,
irq->shareable, ctx); irq->shareable, irq->wake_capable, ctx);
return AE_CTRL_TERMINATE; return AE_CTRL_TERMINATE;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
eirq = &ares->data.extended_irq; eirq = &ares->data.extended_irq;
@ -218,7 +220,7 @@ static acpi_status acpi_irq_parse_one_cb(struct acpi_resource *ares,
eirq->interrupts[ctx->index]); eirq->interrupts[ctx->index]);
acpi_irq_parse_one_match(fwnode, eirq->interrupts[ctx->index], acpi_irq_parse_one_match(fwnode, eirq->interrupts[ctx->index],
eirq->triggering, eirq->polarity, eirq->triggering, eirq->polarity,
eirq->shareable, ctx); eirq->shareable, eirq->wake_capable, ctx);
return AE_CTRL_TERMINATE; return AE_CTRL_TERMINATE;
} }

View File

@ -9,7 +9,6 @@
*/ */
#define pr_fmt(fmt) "acpi/hmat: " fmt #define pr_fmt(fmt) "acpi/hmat: " fmt
#define dev_fmt(fmt) "acpi/hmat: " fmt
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/bitops.h> #include <linux/bitops.h>
@ -302,7 +301,7 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
u8 type, mem_hier; u8 type, mem_hier;
if (hmat_loc->header.length < sizeof(*hmat_loc)) { if (hmat_loc->header.length < sizeof(*hmat_loc)) {
pr_notice("HMAT: Unexpected locality header length: %u\n", pr_notice("Unexpected locality header length: %u\n",
hmat_loc->header.length); hmat_loc->header.length);
return -EINVAL; return -EINVAL;
} }
@ -314,12 +313,12 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
total_size = sizeof(*hmat_loc) + sizeof(*entries) * ipds * tpds + total_size = sizeof(*hmat_loc) + sizeof(*entries) * ipds * tpds +
sizeof(*inits) * ipds + sizeof(*targs) * tpds; sizeof(*inits) * ipds + sizeof(*targs) * tpds;
if (hmat_loc->header.length < total_size) { if (hmat_loc->header.length < total_size) {
pr_notice("HMAT: Unexpected locality header length:%u, minimum required:%u\n", pr_notice("Unexpected locality header length:%u, minimum required:%u\n",
hmat_loc->header.length, total_size); hmat_loc->header.length, total_size);
return -EINVAL; return -EINVAL;
} }
pr_info("HMAT: Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n", pr_info("Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
hmat_loc->flags, hmat_data_type(type), ipds, tpds, hmat_loc->flags, hmat_data_type(type), ipds, tpds,
hmat_loc->entry_base_unit); hmat_loc->entry_base_unit);
@ -363,13 +362,13 @@ static __init int hmat_parse_cache(union acpi_subtable_headers *header,
u32 attrs; u32 attrs;
if (cache->header.length < sizeof(*cache)) { if (cache->header.length < sizeof(*cache)) {
pr_notice("HMAT: Unexpected cache header length: %u\n", pr_notice("Unexpected cache header length: %u\n",
cache->header.length); cache->header.length);
return -EINVAL; return -EINVAL;
} }
attrs = cache->cache_attributes; attrs = cache->cache_attributes;
pr_info("HMAT: Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n", pr_info("Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
cache->memory_PD, cache->cache_size, attrs, cache->memory_PD, cache->cache_size, attrs,
cache->number_of_SMBIOShandles); cache->number_of_SMBIOShandles);
@ -424,24 +423,24 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
struct memory_target *target = NULL; struct memory_target *target = NULL;
if (p->header.length != sizeof(*p)) { if (p->header.length != sizeof(*p)) {
pr_notice("HMAT: Unexpected address range header length: %u\n", pr_notice("Unexpected address range header length: %u\n",
p->header.length); p->header.length);
return -EINVAL; return -EINVAL;
} }
if (hmat_revision == 1) if (hmat_revision == 1)
pr_info("HMAT: Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n", pr_info("Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
p->reserved3, p->reserved4, p->flags, p->processor_PD, p->reserved3, p->reserved4, p->flags, p->processor_PD,
p->memory_PD); p->memory_PD);
else else
pr_info("HMAT: Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n", pr_info("Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
p->flags, p->processor_PD, p->memory_PD); p->flags, p->processor_PD, p->memory_PD);
if ((hmat_revision == 1 && p->flags & ACPI_HMAT_MEMORY_PD_VALID) || if ((hmat_revision == 1 && p->flags & ACPI_HMAT_MEMORY_PD_VALID) ||
hmat_revision > 1) { hmat_revision > 1) {
target = find_mem_target(p->memory_PD); target = find_mem_target(p->memory_PD);
if (!target) { if (!target) {
pr_debug("HMAT: Memory Domain missing from SRAT\n"); pr_debug("Memory Domain missing from SRAT\n");
return -EINVAL; return -EINVAL;
} }
} }
@ -449,7 +448,7 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
int p_node = pxm_to_node(p->processor_PD); int p_node = pxm_to_node(p->processor_PD);
if (p_node == NUMA_NO_NODE) { if (p_node == NUMA_NO_NODE) {
pr_debug("HMAT: Invalid Processor Domain\n"); pr_debug("Invalid Processor Domain\n");
return -EINVAL; return -EINVAL;
} }
target->processor_pxm = p->processor_PD; target->processor_pxm = p->processor_PD;
@ -563,17 +562,26 @@ static int initiator_cmp(void *priv, const struct list_head *a,
{ {
struct memory_initiator *ia; struct memory_initiator *ia;
struct memory_initiator *ib; struct memory_initiator *ib;
unsigned long *p_nodes = priv;
ia = list_entry(a, struct memory_initiator, node); ia = list_entry(a, struct memory_initiator, node);
ib = list_entry(b, struct memory_initiator, node); ib = list_entry(b, struct memory_initiator, node);
set_bit(ia->processor_pxm, p_nodes);
set_bit(ib->processor_pxm, p_nodes);
return ia->processor_pxm - ib->processor_pxm; return ia->processor_pxm - ib->processor_pxm;
} }
static int initiators_to_nodemask(unsigned long *p_nodes)
{
struct memory_initiator *initiator;
if (list_empty(&initiators))
return -ENXIO;
list_for_each_entry(initiator, &initiators, node)
set_bit(initiator->processor_pxm, p_nodes);
return 0;
}
static void hmat_register_target_initiators(struct memory_target *target) static void hmat_register_target_initiators(struct memory_target *target)
{ {
static DECLARE_BITMAP(p_nodes, MAX_NUMNODES); static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
@ -610,7 +618,10 @@ static void hmat_register_target_initiators(struct memory_target *target)
* initiators. * initiators.
*/ */
bitmap_zero(p_nodes, MAX_NUMNODES); bitmap_zero(p_nodes, MAX_NUMNODES);
list_sort(p_nodes, &initiators, initiator_cmp); list_sort(NULL, &initiators, initiator_cmp);
if (initiators_to_nodemask(p_nodes) < 0)
return;
if (!access0done) { if (!access0done) {
for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) { for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
loc = localities_types[i]; loc = localities_types[i];
@ -644,8 +655,9 @@ static void hmat_register_target_initiators(struct memory_target *target)
/* Access 1 ignores Generic Initiators */ /* Access 1 ignores Generic Initiators */
bitmap_zero(p_nodes, MAX_NUMNODES); bitmap_zero(p_nodes, MAX_NUMNODES);
list_sort(p_nodes, &initiators, initiator_cmp); if (initiators_to_nodemask(p_nodes) < 0)
best = 0; return;
for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) { for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
loc = localities_types[i]; loc = localities_types[i];
if (!loc) if (!loc)
@ -840,7 +852,7 @@ static __init int hmat_init(void)
case 2: case 2:
break; break;
default: default:
pr_notice("Ignoring HMAT: Unknown revision:%d\n", hmat_revision); pr_notice("Ignoring: Unknown revision:%d\n", hmat_revision);
goto out_put; goto out_put;
} }
@ -848,7 +860,7 @@ static __init int hmat_init(void)
if (acpi_table_parse_entries(ACPI_SIG_HMAT, if (acpi_table_parse_entries(ACPI_SIG_HMAT,
sizeof(struct acpi_table_hmat), i, sizeof(struct acpi_table_hmat), i,
hmat_parse_subtable, 0) < 0) { hmat_parse_subtable, 0) < 0) {
pr_notice("Ignoring HMAT: Invalid table"); pr_notice("Ignoring: Invalid table");
goto out_put; goto out_put;
} }
} }

View File

@ -327,6 +327,7 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n", pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n",
node, start, end); node, start, end);
} }
node_set(node, numa_nodes_parsed);
/* Set the next available fake_pxm value */ /* Set the next available fake_pxm value */
(*fake_pxm)++; (*fake_pxm)++;

View File

@ -44,30 +44,6 @@ osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
{"Processor Device", true}, {"Processor Device", true},
{"3.0 _SCP Extensions", true}, {"3.0 _SCP Extensions", true},
{"Processor Aggregator Device", true}, {"Processor Aggregator Device", true},
/*
* Linux-Dell-Video is used by BIOS to disable RTD3 for NVidia graphics
* cards as RTD3 is not supported by drivers now. Systems with NVidia
* cards will hang without RTD3 disabled.
*
* Once NVidia drivers officially support RTD3, this _OSI strings can
* be removed if both new and old graphics cards are supported.
*/
{"Linux-Dell-Video", true},
/*
* Linux-Lenovo-NV-HDMI-Audio is used by BIOS to power on NVidia's HDMI
* audio device which is turned off for power-saving in Windows OS.
* This power management feature observed on some Lenovo Thinkpad
* systems which will not be able to output audio via HDMI without
* a BIOS workaround.
*/
{"Linux-Lenovo-NV-HDMI-Audio", true},
/*
* Linux-HPI-Hybrid-Graphics is used by BIOS to enable dGPU to
* output video directly to external monitors on HP Inc. mobile
* workstations as Nvidia and AMD VGA drivers provide limited
* hybrid graphics supports.
*/
{"Linux-HPI-Hybrid-Graphics", true},
}; };
static u32 acpi_osi_handler(acpi_string interface, u32 supported) static u32 acpi_osi_handler(acpi_string interface, u32 supported)

View File

@ -312,76 +312,26 @@ struct acpi_handle_node {
*/ */
struct pci_dev *acpi_get_pci_dev(acpi_handle handle) struct pci_dev *acpi_get_pci_dev(acpi_handle handle)
{ {
int dev, fn; struct acpi_device *adev = acpi_fetch_acpi_dev(handle);
unsigned long long adr; struct acpi_device_physical_node *pn;
acpi_status status; struct pci_dev *pci_dev = NULL;
acpi_handle phandle;
struct pci_bus *pbus;
struct pci_dev *pdev = NULL;
struct acpi_handle_node *node, *tmp;
struct acpi_pci_root *root;
LIST_HEAD(device_list);
/* if (!adev)
* Walk up the ACPI CA namespace until we reach a PCI root bridge. return NULL;
*/
phandle = handle;
while (!acpi_is_root_bridge(phandle)) {
node = kzalloc(sizeof(struct acpi_handle_node), GFP_KERNEL);
if (!node)
goto out;
INIT_LIST_HEAD(&node->node); mutex_lock(&adev->physical_node_lock);
node->handle = phandle;
list_add(&node->node, &device_list);
status = acpi_get_parent(phandle, &phandle); list_for_each_entry(pn, &adev->physical_node_list, node) {
if (ACPI_FAILURE(status)) if (dev_is_pci(pn->dev)) {
goto out; get_device(pn->dev);
} pci_dev = to_pci_dev(pn->dev);
root = acpi_pci_find_root(phandle);
if (!root)
goto out;
pbus = root->bus;
/*
* Now, walk back down the PCI device tree until we return to our
* original handle. Assumes that everything between the PCI root
* bridge and the device we're looking for must be a P2P bridge.
*/
list_for_each_entry(node, &device_list, node) {
acpi_handle hnd = node->handle;
status = acpi_evaluate_integer(hnd, "_ADR", NULL, &adr);
if (ACPI_FAILURE(status))
goto out;
dev = (adr >> 16) & 0xffff;
fn = adr & 0xffff;
pdev = pci_get_slot(pbus, PCI_DEVFN(dev, fn));
if (!pdev || hnd == handle)
break;
pbus = pdev->subordinate;
pci_dev_put(pdev);
/*
* This function may be called for a non-PCI device that has a
* PCI parent (eg. a disk under a PCI SATA controller). In that
* case pdev->subordinate will be NULL for the parent.
*/
if (!pbus) {
dev_dbg(&pdev->dev, "Not a PCI-to-PCI bridge\n");
pdev = NULL;
break; break;
} }
} }
out:
list_for_each_entry_safe(node, tmp, &device_list, node)
kfree(node);
return pdev; mutex_unlock(&adev->physical_node_lock);
return pci_dev;
} }
EXPORT_SYMBOL_GPL(acpi_get_pci_dev); EXPORT_SYMBOL_GPL(acpi_get_pci_dev);

View File

@ -944,13 +944,15 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
return NULL; return NULL;
device = &resource->device; device = &resource->device;
acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER); acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER,
acpi_release_power_resource);
mutex_init(&resource->resource_lock); mutex_init(&resource->resource_lock);
INIT_LIST_HEAD(&resource->list_node); INIT_LIST_HEAD(&resource->list_node);
INIT_LIST_HEAD(&resource->dependents); INIT_LIST_HEAD(&resource->dependents);
strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_POWER_CLASS); strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
device->power.state = ACPI_STATE_UNKNOWN; device->power.state = ACPI_STATE_UNKNOWN;
device->flags.match_driver = true;
/* Evaluate the object to get the system level and resource order. */ /* Evaluate the object to get the system level and resource order. */
status = acpi_evaluate_object(handle, NULL, NULL, &buffer); status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
@ -967,8 +969,11 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
pr_info("%s [%s]\n", acpi_device_name(device), acpi_device_bid(device)); pr_info("%s [%s]\n", acpi_device_name(device), acpi_device_bid(device));
device->flags.match_driver = true; result = acpi_tie_acpi_dev(device);
result = acpi_device_add(device, acpi_release_power_resource); if (result)
goto err;
result = acpi_device_add(device);
if (result) if (result)
goto err; goto err;

View File

@ -531,10 +531,27 @@ static void wait_for_freeze(void)
/* No delay is needed if we are in guest */ /* No delay is needed if we are in guest */
if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
return; return;
/*
* Modern (>=Nehalem) Intel systems use ACPI via intel_idle,
* not this code. Assume that any Intel systems using this
* are ancient and may need the dummy wait. This also assumes
* that the motivating chipset issue was Intel-only.
*/
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
return;
#endif #endif
/* Dummy wait op - must do something useless after P_LVL2 read /*
because chipsets cannot guarantee that STPCLK# signal * Dummy wait op - must do something useless after P_LVL2 read
gets asserted in time to freeze execution properly. */ * because chipsets cannot guarantee that STPCLK# signal gets
* asserted in time to freeze execution properly
*
* This workaround has been in place since the original ACPI
* implementation was merged, circa 2002.
*
* If a profile is pointing to this instruction, please first
* consider moving your system to a more modern idle
* mechanism.
*/
inl(acpi_gbl_FADT.xpm_timer_block.address); inl(acpi_gbl_FADT.xpm_timer_block.address);
} }
@ -787,7 +804,7 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
state = &drv->states[count]; state = &drv->states[count];
snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
state->exit_latency = cx->latency; state->exit_latency = cx->latency;
state->target_residency = cx->latency * latency_factor; state->target_residency = cx->latency * latency_factor;
state->enter = acpi_idle_enter; state->enter = acpi_idle_enter;
@ -956,7 +973,7 @@ static int acpi_processor_evaluate_lpi(acpi_handle handle,
obj = pkg_elem + 9; obj = pkg_elem + 9;
if (obj->type == ACPI_TYPE_STRING) if (obj->type == ACPI_TYPE_STRING)
strlcpy(lpi_state->desc, obj->string.pointer, strscpy(lpi_state->desc, obj->string.pointer,
ACPI_CX_DESC_LEN); ACPI_CX_DESC_LEN);
lpi_state->index = state_idx; lpi_state->index = state_idx;
@ -1022,7 +1039,7 @@ static bool combine_lpi_states(struct acpi_lpi_state *local,
result->arch_flags = parent->arch_flags; result->arch_flags = parent->arch_flags;
result->index = parent->index; result->index = parent->index;
strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN); strscpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
strlcat(result->desc, "+", ACPI_CX_DESC_LEN); strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN); strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
return true; return true;
@ -1196,7 +1213,7 @@ static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
state = &drv->states[i]; state = &drv->states[i];
snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i); snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN); strscpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
state->exit_latency = lpi->wake_latency; state->exit_latency = lpi->wake_latency;
state->target_residency = lpi->min_residency; state->target_residency = lpi->min_residency;
if (lpi->arch_flags) if (lpi->arch_flags)

View File

@ -304,8 +304,10 @@ static void acpi_init_of_compatible(struct acpi_device *adev)
ret = acpi_dev_get_property(adev, "compatible", ret = acpi_dev_get_property(adev, "compatible",
ACPI_TYPE_STRING, &of_compatible); ACPI_TYPE_STRING, &of_compatible);
if (ret) { if (ret) {
if (adev->parent struct acpi_device *parent;
&& adev->parent->flags.of_compatible_ok)
parent = acpi_dev_parent(adev);
if (parent && parent->flags.of_compatible_ok)
goto out; goto out;
return; return;
@ -1267,10 +1269,11 @@ acpi_node_get_parent(const struct fwnode_handle *fwnode)
return to_acpi_data_node(fwnode)->parent; return to_acpi_data_node(fwnode)->parent;
} }
if (is_acpi_device_node(fwnode)) { if (is_acpi_device_node(fwnode)) {
struct device *dev = to_acpi_device_node(fwnode)->dev.parent; struct acpi_device *parent;
if (dev) parent = acpi_dev_parent(to_acpi_device_node(fwnode));
return acpi_fwnode_handle(to_acpi_device(dev)); if (parent)
return acpi_fwnode_handle(parent);
} }
return NULL; return NULL;

View File

@ -336,8 +336,9 @@ EXPORT_SYMBOL_GPL(acpi_dev_resource_ext_address_space);
* @triggering: Triggering type as provided by ACPI. * @triggering: Triggering type as provided by ACPI.
* @polarity: Interrupt polarity as provided by ACPI. * @polarity: Interrupt polarity as provided by ACPI.
* @shareable: Whether or not the interrupt is shareable. * @shareable: Whether or not the interrupt is shareable.
* @wake_capable: Wake capability as provided by ACPI.
*/ */
unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable) unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable, u8 wake_capable)
{ {
unsigned long flags; unsigned long flags;
@ -351,6 +352,9 @@ unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable)
if (shareable == ACPI_SHARED) if (shareable == ACPI_SHARED)
flags |= IORESOURCE_IRQ_SHAREABLE; flags |= IORESOURCE_IRQ_SHAREABLE;
if (wake_capable == ACPI_WAKE_CAPABLE)
flags |= IORESOURCE_IRQ_WAKECAPABLE;
return flags | IORESOURCE_IRQ; return flags | IORESOURCE_IRQ;
} }
EXPORT_SYMBOL_GPL(acpi_dev_irq_flags); EXPORT_SYMBOL_GPL(acpi_dev_irq_flags);
@ -399,16 +403,63 @@ static const struct dmi_system_id medion_laptop[] = {
{ } { }
}; };
static const struct dmi_system_id asus_laptop[] = {
{
.ident = "Asus Vivobook K3402ZA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "K3402ZA"),
},
},
{
.ident = "Asus Vivobook K3502ZA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "K3502ZA"),
},
},
{
.ident = "Asus Vivobook S5402ZA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "S5402ZA"),
},
},
{
.ident = "Asus Vivobook S5602ZA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
},
},
{ }
};
static const struct dmi_system_id lenovo_82ra[] = {
{
.ident = "LENOVO IdeaPad Flex 5 16ALC7",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_NAME, "82RA"),
},
},
{ }
};
struct irq_override_cmp { struct irq_override_cmp {
const struct dmi_system_id *system; const struct dmi_system_id *system;
unsigned char irq; unsigned char irq;
unsigned char triggering; unsigned char triggering;
unsigned char polarity; unsigned char polarity;
unsigned char shareable; unsigned char shareable;
bool override;
}; };
static const struct irq_override_cmp skip_override_table[] = { static const struct irq_override_cmp override_table[] = {
{ medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 }, { medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
{ asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
{ lenovo_82ra, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
{ lenovo_82ra, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
}; };
static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
@ -416,6 +467,17 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
{ {
int i; int i;
for (i = 0; i < ARRAY_SIZE(override_table); i++) {
const struct irq_override_cmp *entry = &override_table[i];
if (dmi_check_system(entry->system) &&
entry->irq == gsi &&
entry->triggering == triggering &&
entry->polarity == polarity &&
entry->shareable == shareable)
return entry->override;
}
#ifdef CONFIG_X86 #ifdef CONFIG_X86
/* /*
* IRQ override isn't needed on modern AMD Zen systems and * IRQ override isn't needed on modern AMD Zen systems and
@ -426,23 +488,12 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
return false; return false;
#endif #endif
for (i = 0; i < ARRAY_SIZE(skip_override_table); i++) {
const struct irq_override_cmp *entry = &skip_override_table[i];
if (dmi_check_system(entry->system) &&
entry->irq == gsi &&
entry->triggering == triggering &&
entry->polarity == polarity &&
entry->shareable == shareable)
return false;
}
return true; return true;
} }
static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
u8 triggering, u8 polarity, u8 shareable, u8 triggering, u8 polarity, u8 shareable,
bool check_override) u8 wake_capable, bool check_override)
{ {
int irq, p, t; int irq, p, t;
@ -468,14 +519,17 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
if (triggering != trig || polarity != pol) { if (triggering != trig || polarity != pol) {
pr_warn("ACPI: IRQ %d override to %s, %s\n", gsi, pr_warn("ACPI: IRQ %d override to %s%s, %s%s\n", gsi,
t ? "level" : "edge", p ? "low" : "high"); t ? "level" : "edge",
trig == triggering ? "" : "(!)",
p ? "low" : "high",
pol == polarity ? "" : "(!)");
triggering = trig; triggering = trig;
polarity = pol; polarity = pol;
} }
} }
res->flags = acpi_dev_irq_flags(triggering, polarity, shareable); res->flags = acpi_dev_irq_flags(triggering, polarity, shareable, wake_capable);
irq = acpi_register_gsi(NULL, gsi, triggering, polarity); irq = acpi_register_gsi(NULL, gsi, triggering, polarity);
if (irq >= 0) { if (irq >= 0) {
res->start = irq; res->start = irq;
@ -523,7 +577,8 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
} }
acpi_dev_get_irqresource(res, irq->interrupts[index], acpi_dev_get_irqresource(res, irq->interrupts[index],
irq->triggering, irq->polarity, irq->triggering, irq->polarity,
irq->shareable, true); irq->shareable, irq->wake_capable,
true);
break; break;
case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
ext_irq = &ares->data.extended_irq; ext_irq = &ares->data.extended_irq;
@ -534,7 +589,8 @@ bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index,
if (is_gsi(ext_irq)) if (is_gsi(ext_irq))
acpi_dev_get_irqresource(res, ext_irq->interrupts[index], acpi_dev_get_irqresource(res, ext_irq->interrupts[index],
ext_irq->triggering, ext_irq->polarity, ext_irq->triggering, ext_irq->polarity,
ext_irq->shareable, false); ext_irq->shareable, ext_irq->wake_capable,
false);
else else
irqresource_disabled(res, 0); irqresource_disabled(res, 0);
break; break;
@ -690,6 +746,9 @@ static int is_memory(struct acpi_resource *ares, void *not_used)
memset(&win, 0, sizeof(win)); memset(&win, 0, sizeof(win));
if (acpi_dev_filter_resource_type(ares, IORESOURCE_MEM))
return 1;
return !(acpi_dev_resource_memory(ares, res) return !(acpi_dev_resource_memory(ares, res)
|| acpi_dev_resource_address_space(ares, &win) || acpi_dev_resource_address_space(ares, &win)
|| acpi_dev_resource_ext_address_space(ares, &win)); || acpi_dev_resource_ext_address_space(ares, &win));
@ -718,6 +777,23 @@ int acpi_dev_get_dma_resources(struct acpi_device *adev, struct list_head *list)
} }
EXPORT_SYMBOL_GPL(acpi_dev_get_dma_resources); EXPORT_SYMBOL_GPL(acpi_dev_get_dma_resources);
/**
* acpi_dev_get_memory_resources - Get current memory resources of a device.
* @adev: ACPI device node to get the resources for.
* @list: Head of the resultant list of resources (must be empty).
*
* This is a helper function that locates all memory type resources of @adev
* with acpi_dev_get_resources().
*
* The number of resources in the output list is returned on success, an error
* code reflecting the error condition is returned otherwise.
*/
int acpi_dev_get_memory_resources(struct acpi_device *adev, struct list_head *list)
{
return acpi_dev_get_resources(adev, list, is_memory, NULL);
}
EXPORT_SYMBOL_GPL(acpi_dev_get_memory_resources);
/** /**
* acpi_dev_filter_resource_type - Filter ACPI resource according to resource * acpi_dev_filter_resource_type - Filter ACPI resource according to resource
* types * types

View File

@ -632,7 +632,7 @@ static int acpi_sbs_add(struct acpi_device *device)
mutex_init(&sbs->lock); mutex_init(&sbs->lock);
sbs->hc = acpi_driver_data(device->parent); sbs->hc = acpi_driver_data(acpi_dev_parent(device));
sbs->device = device; sbs->device = device;
strcpy(acpi_device_name(device), ACPI_SBS_DEVICE_NAME); strcpy(acpi_device_name(device), ACPI_SBS_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_SBS_CLASS); strcpy(acpi_device_class(device), ACPI_SBS_CLASS);

View File

@ -266,7 +266,7 @@ static int acpi_smbus_hc_add(struct acpi_device *device)
mutex_init(&hc->lock); mutex_init(&hc->lock);
init_waitqueue_head(&hc->wait); init_waitqueue_head(&hc->wait);
hc->ec = acpi_driver_data(device->parent); hc->ec = acpi_driver_data(acpi_dev_parent(device));
hc->offset = (val >> 8) & 0xff; hc->offset = (val >> 8) & 0xff;
hc->query_bit = val & 0xff; hc->query_bit = val & 0xff;
device->driver_data = hc; device->driver_data = hc;

View File

@ -20,6 +20,7 @@
#include <linux/platform_data/x86/apple.h> #include <linux/platform_data/x86/apple.h>
#include <linux/pgtable.h> #include <linux/pgtable.h>
#include <linux/crc32.h> #include <linux/crc32.h>
#include <linux/dma-direct.h>
#include "internal.h" #include "internal.h"
@ -29,8 +30,6 @@ extern struct acpi_device *acpi_root;
#define ACPI_BUS_HID "LNXSYBUS" #define ACPI_BUS_HID "LNXSYBUS"
#define ACPI_BUS_DEVICE_NAME "System Bus" #define ACPI_BUS_DEVICE_NAME "System Bus"
#define ACPI_IS_ROOT_DEVICE(device) (!(device)->parent)
#define INVALID_ACPI_HANDLE ((acpi_handle)empty_zero_page) #define INVALID_ACPI_HANDLE ((acpi_handle)empty_zero_page)
static const char *dummy_hid = "device"; static const char *dummy_hid = "device";
@ -429,7 +428,7 @@ void acpi_device_hotplug(struct acpi_device *adev, u32 src)
acpi_evaluate_ost(adev->handle, src, ost_code, NULL); acpi_evaluate_ost(adev->handle, src, ost_code, NULL);
out: out:
acpi_bus_put_acpi_device(adev); acpi_put_acpi_dev(adev);
mutex_unlock(&acpi_scan_lock); mutex_unlock(&acpi_scan_lock);
unlock_device_hotplug(); unlock_device_hotplug();
} }
@ -599,11 +598,22 @@ static void get_acpi_device(void *dev)
acpi_dev_get(dev); acpi_dev_get(dev);
} }
struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle) /**
* acpi_get_acpi_dev - Retrieve ACPI device object and reference count it.
* @handle: ACPI handle associated with the requested ACPI device object.
*
* Return a pointer to the ACPI device object associated with @handle and bump
* up that object's reference counter (under the ACPI Namespace lock), if
* present, or return NULL otherwise.
*
* The ACPI device object reference acquired by this function needs to be
* dropped via acpi_dev_put().
*/
struct acpi_device *acpi_get_acpi_dev(acpi_handle handle)
{ {
return handle_to_device(handle, get_acpi_device); return handle_to_device(handle, get_acpi_device);
} }
EXPORT_SYMBOL_GPL(acpi_bus_get_acpi_device); EXPORT_SYMBOL_GPL(acpi_get_acpi_dev);
static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id) static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
{ {
@ -632,7 +642,7 @@ static int acpi_device_set_name(struct acpi_device *device,
return 0; return 0;
} }
static int acpi_tie_acpi_dev(struct acpi_device *adev) int acpi_tie_acpi_dev(struct acpi_device *adev)
{ {
acpi_handle handle = adev->handle; acpi_handle handle = adev->handle;
acpi_status status; acpi_status status;
@ -662,8 +672,7 @@ static void acpi_store_pld_crc(struct acpi_device *adev)
ACPI_FREE(pld); ACPI_FREE(pld);
} }
static int __acpi_device_add(struct acpi_device *device, int acpi_device_add(struct acpi_device *device)
void (*release)(struct device *))
{ {
struct acpi_device_bus_id *acpi_device_bus_id; struct acpi_device_bus_id *acpi_device_bus_id;
int result; int result;
@ -719,11 +728,6 @@ static int __acpi_device_add(struct acpi_device *device,
mutex_unlock(&acpi_device_lock); mutex_unlock(&acpi_device_lock);
if (device->parent)
device->dev.parent = &device->parent->dev;
device->dev.bus = &acpi_bus_type;
device->dev.release = release;
result = device_add(&device->dev); result = device_add(&device->dev);
if (result) { if (result) {
dev_err(&device->dev, "Error registering device\n"); dev_err(&device->dev, "Error registering device\n");
@ -750,17 +754,6 @@ static int __acpi_device_add(struct acpi_device *device,
return result; return result;
} }
int acpi_device_add(struct acpi_device *adev, void (*release)(struct device *))
{
int ret;
ret = acpi_tie_acpi_dev(adev);
if (ret)
return ret;
return __acpi_device_add(adev, release);
}
/* -------------------------------------------------------------------------- /* --------------------------------------------------------------------------
Device Enumeration Device Enumeration
-------------------------------------------------------------------------- */ -------------------------------------------------------------------------- */
@ -796,6 +789,7 @@ static bool acpi_info_matches_ids(struct acpi_device_info *info,
static const char * const acpi_ignore_dep_ids[] = { static const char * const acpi_ignore_dep_ids[] = {
"PNP0D80", /* Windows-compatible System Power Management Controller */ "PNP0D80", /* Windows-compatible System Power Management Controller */
"INT33BD", /* Intel Baytrail Mailbox Device */ "INT33BD", /* Intel Baytrail Mailbox Device */
"LATT2021", /* Lattice FW Update Client Driver */
NULL NULL
}; };
@ -805,10 +799,9 @@ static const char * const acpi_honor_dep_ids[] = {
NULL NULL
}; };
static struct acpi_device *acpi_bus_get_parent(acpi_handle handle) static struct acpi_device *acpi_find_parent_acpi_dev(acpi_handle handle)
{ {
struct acpi_device *device; struct acpi_device *adev;
acpi_status status;
/* /*
* Fixed hardware devices do not appear in the namespace and do not * Fixed hardware devices do not appear in the namespace and do not
@ -819,13 +812,18 @@ static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
return acpi_root; return acpi_root;
do { do {
status = acpi_get_parent(handle, &handle); acpi_status status;
if (ACPI_FAILURE(status))
return status == AE_NULL_ENTRY ? NULL : acpi_root;
device = acpi_fetch_acpi_dev(handle); status = acpi_get_parent(handle, &handle);
} while (!device); if (ACPI_FAILURE(status)) {
return device; if (status != AE_NULL_ENTRY)
return acpi_root;
return NULL;
}
adev = acpi_fetch_acpi_dev(handle);
} while (!adev);
return adev;
} }
acpi_status acpi_status
@ -1112,7 +1110,7 @@ static void acpi_device_get_busid(struct acpi_device *device)
* The device's Bus ID is simply the object name. * The device's Bus ID is simply the object name.
* TBD: Shouldn't this value be unique (within the ACPI namespace)? * TBD: Shouldn't this value be unique (within the ACPI namespace)?
*/ */
if (ACPI_IS_ROOT_DEVICE(device)) { if (!acpi_dev_parent(device)) {
strcpy(device->pnp.bus_id, "ACPI"); strcpy(device->pnp.bus_id, "ACPI");
return; return;
} }
@ -1467,25 +1465,21 @@ enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
* acpi_dma_get_range() - Get device DMA parameters. * acpi_dma_get_range() - Get device DMA parameters.
* *
* @dev: device to configure * @dev: device to configure
* @dma_addr: pointer device DMA address result * @map: pointer to DMA ranges result
* @offset: pointer to the DMA offset result
* @size: pointer to DMA range size result
* *
* Evaluate DMA regions and return respectively DMA region start, offset * Evaluate DMA regions and return pointer to DMA regions on
* and size in dma_addr, offset and size on parsing success; it does not * parsing success; it does not update the passed in values on failure.
* update the passed in values on failure.
* *
* Return 0 on success, < 0 on failure. * Return 0 on success, < 0 on failure.
*/ */
int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset, int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map)
u64 *size)
{ {
struct acpi_device *adev; struct acpi_device *adev;
LIST_HEAD(list); LIST_HEAD(list);
struct resource_entry *rentry; struct resource_entry *rentry;
int ret; int ret;
struct device *dma_dev = dev; struct device *dma_dev = dev;
u64 len, dma_start = U64_MAX, dma_end = 0, dma_offset = 0; struct bus_dma_region *r;
/* /*
* Walk the device tree chasing an ACPI companion with a _DMA * Walk the device tree chasing an ACPI companion with a _DMA
@ -1510,31 +1504,29 @@ int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
ret = acpi_dev_get_dma_resources(adev, &list); ret = acpi_dev_get_dma_resources(adev, &list);
if (ret > 0) { if (ret > 0) {
list_for_each_entry(rentry, &list, node) { r = kcalloc(ret + 1, sizeof(*r), GFP_KERNEL);
if (dma_offset && rentry->offset != dma_offset) { if (!r) {
ret = -EINVAL; ret = -ENOMEM;
dev_warn(dma_dev, "Can't handle multiple windows with different offsets\n");
goto out;
}
dma_offset = rentry->offset;
/* Take lower and upper limits */
if (rentry->res->start < dma_start)
dma_start = rentry->res->start;
if (rentry->res->end > dma_end)
dma_end = rentry->res->end;
}
if (dma_start >= dma_end) {
ret = -EINVAL;
dev_dbg(dma_dev, "Invalid DMA regions configuration\n");
goto out; goto out;
} }
*dma_addr = dma_start - dma_offset; *map = r;
len = dma_end - dma_start;
*size = max(len, len + 1); list_for_each_entry(rentry, &list, node) {
*offset = dma_offset; if (rentry->res->start >= rentry->res->end) {
kfree(*map);
*map = NULL;
ret = -EINVAL;
dev_dbg(dma_dev, "Invalid DMA regions configuration\n");
goto out;
}
r->cpu_start = rentry->res->start;
r->dma_start = rentry->res->start - rentry->offset;
r->size = resource_size(rentry->res);
r->offset = rentry->offset;
r++;
}
} }
out: out:
acpi_dev_free_resource_list(&list); acpi_dev_free_resource_list(&list);
@ -1624,20 +1616,19 @@ int acpi_dma_configure_id(struct device *dev, enum dev_dma_attr attr,
const u32 *input_id) const u32 *input_id)
{ {
const struct iommu_ops *iommu; const struct iommu_ops *iommu;
u64 dma_addr = 0, size = 0;
if (attr == DEV_DMA_NOT_SUPPORTED) { if (attr == DEV_DMA_NOT_SUPPORTED) {
set_dma_ops(dev, &dma_dummy_ops); set_dma_ops(dev, &dma_dummy_ops);
return 0; return 0;
} }
acpi_arch_dma_setup(dev, &dma_addr, &size); acpi_arch_dma_setup(dev);
iommu = acpi_iommu_configure_id(dev, input_id); iommu = acpi_iommu_configure_id(dev, input_id);
if (PTR_ERR(iommu) == -EPROBE_DEFER) if (PTR_ERR(iommu) == -EPROBE_DEFER)
return -EPROBE_DEFER; return -EPROBE_DEFER;
arch_setup_dma_ops(dev, dma_addr, size, arch_setup_dma_ops(dev, 0, U64_MAX,
iommu, attr == DEV_DMA_COHERENT); iommu, attr == DEV_DMA_COHERENT);
return 0; return 0;
@ -1648,7 +1639,7 @@ static void acpi_init_coherency(struct acpi_device *adev)
{ {
unsigned long long cca = 0; unsigned long long cca = 0;
acpi_status status; acpi_status status;
struct acpi_device *parent = adev->parent; struct acpi_device *parent = acpi_dev_parent(adev);
if (parent && parent->flags.cca_seen) { if (parent && parent->flags.cca_seen) {
/* /*
@ -1692,7 +1683,7 @@ static int acpi_check_serial_bus_slave(struct acpi_resource *ares, void *data)
static bool acpi_is_indirect_io_slave(struct acpi_device *device) static bool acpi_is_indirect_io_slave(struct acpi_device *device)
{ {
struct acpi_device *parent = device->parent; struct acpi_device *parent = acpi_dev_parent(device);
static const struct acpi_device_id indirect_io_hosts[] = { static const struct acpi_device_id indirect_io_hosts[] = {
{"HISI0191", 0}, {"HISI0191", 0},
{} {}
@ -1762,12 +1753,16 @@ static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
} }
void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
int type) int type, void (*release)(struct device *))
{ {
struct acpi_device *parent = acpi_find_parent_acpi_dev(handle);
INIT_LIST_HEAD(&device->pnp.ids); INIT_LIST_HEAD(&device->pnp.ids);
device->device_type = type; device->device_type = type;
device->handle = handle; device->handle = handle;
device->parent = acpi_bus_get_parent(handle); device->dev.parent = parent ? &parent->dev : NULL;
device->dev.release = release;
device->dev.bus = &acpi_bus_type;
fwnode_init(&device->fwnode, &acpi_device_fwnode_ops); fwnode_init(&device->fwnode, &acpi_device_fwnode_ops);
acpi_set_device_status(device, ACPI_STA_DEFAULT); acpi_set_device_status(device, ACPI_STA_DEFAULT);
acpi_device_get_busid(device); acpi_device_get_busid(device);
@ -1821,7 +1816,7 @@ static int acpi_add_single_object(struct acpi_device **child,
if (!device) if (!device)
return -ENOMEM; return -ENOMEM;
acpi_init_device_object(device, handle, type); acpi_init_device_object(device, handle, type, acpi_device_release);
/* /*
* Getting the status is delayed till here so that we can call * Getting the status is delayed till here so that we can call
* acpi_bus_get_status() and use its quirk handling. Note that * acpi_bus_get_status() and use its quirk handling. Note that
@ -1851,7 +1846,7 @@ static int acpi_add_single_object(struct acpi_device **child,
mutex_unlock(&acpi_dep_list_lock); mutex_unlock(&acpi_dep_list_lock);
if (!result) if (!result)
result = __acpi_device_add(device, acpi_device_release); result = acpi_device_add(device);
if (result) { if (result) {
acpi_device_release(&device->dev); acpi_device_release(&device->dev);
@ -1862,8 +1857,8 @@ static int acpi_add_single_object(struct acpi_device **child,
acpi_device_add_finalize(device); acpi_device_add_finalize(device);
acpi_handle_debug(handle, "Added as %s, parent %s\n", acpi_handle_debug(handle, "Added as %s, parent %s\n",
dev_name(&device->dev), device->parent ? dev_name(&device->dev), device->dev.parent ?
dev_name(&device->parent->dev) : "(null)"); dev_name(device->dev.parent) : "(null)");
*child = device; *child = device;
return 0; return 0;
@ -2235,11 +2230,24 @@ static int acpi_bus_attach(struct acpi_device *device, void *first_pass)
return 0; return 0;
} }
static int acpi_dev_get_first_consumer_dev_cb(struct acpi_dep_data *dep, void *data) static int acpi_dev_get_next_consumer_dev_cb(struct acpi_dep_data *dep, void *data)
{ {
struct acpi_device *adev; struct acpi_device **adev_p = data;
struct acpi_device *adev = *adev_p;
adev = acpi_bus_get_acpi_device(dep->consumer); /*
* If we're passed a 'previous' consumer device then we need to skip
* any consumers until we meet the previous one, and then NULL @data
* so the next one can be returned.
*/
if (adev) {
if (dep->consumer == adev->handle)
*adev_p = NULL;
return 0;
}
adev = acpi_get_acpi_dev(dep->consumer);
if (adev) { if (adev) {
*(struct acpi_device **)data = adev; *(struct acpi_device **)data = adev;
return 1; return 1;
@ -2292,7 +2300,7 @@ static bool acpi_scan_clear_dep_queue(struct acpi_device *adev)
static int acpi_scan_clear_dep(struct acpi_dep_data *dep, void *data) static int acpi_scan_clear_dep(struct acpi_dep_data *dep, void *data)
{ {
struct acpi_device *adev = acpi_bus_get_acpi_device(dep->consumer); struct acpi_device *adev = acpi_get_acpi_dev(dep->consumer);
if (adev) { if (adev) {
adev->dep_unmet--; adev->dep_unmet--;
@ -2368,25 +2376,32 @@ bool acpi_dev_ready_for_enumeration(const struct acpi_device *device)
EXPORT_SYMBOL_GPL(acpi_dev_ready_for_enumeration); EXPORT_SYMBOL_GPL(acpi_dev_ready_for_enumeration);
/** /**
* acpi_dev_get_first_consumer_dev - Return ACPI device dependent on @supplier * acpi_dev_get_next_consumer_dev - Return the next adev dependent on @supplier
* @supplier: Pointer to the dependee device * @supplier: Pointer to the dependee device
* @start: Pointer to the current dependent device
* *
* Returns the first &struct acpi_device which declares itself dependent on * Returns the next &struct acpi_device which declares itself dependent on
* @supplier via the _DEP buffer, parsed from the acpi_dep_list. * @supplier via the _DEP buffer, parsed from the acpi_dep_list.
* *
* The caller is responsible for putting the reference to adev when it is no * If the returned adev is not passed as @start to this function, the caller is
* longer needed. * responsible for putting the reference to adev when it is no longer needed.
*/ */
struct acpi_device *acpi_dev_get_first_consumer_dev(struct acpi_device *supplier) struct acpi_device *acpi_dev_get_next_consumer_dev(struct acpi_device *supplier,
struct acpi_device *start)
{ {
struct acpi_device *adev = NULL; struct acpi_device *adev = start;
acpi_walk_dep_device_list(supplier->handle, acpi_walk_dep_device_list(supplier->handle,
acpi_dev_get_first_consumer_dev_cb, &adev); acpi_dev_get_next_consumer_dev_cb, &adev);
acpi_dev_put(start);
if (adev == start)
return NULL;
return adev; return adev;
} }
EXPORT_SYMBOL_GPL(acpi_dev_get_first_consumer_dev); EXPORT_SYMBOL_GPL(acpi_dev_get_next_consumer_dev);
/** /**
* acpi_bus_scan - Add ACPI device node objects in a given namespace scope. * acpi_bus_scan - Add ACPI device node objects in a given namespace scope.

View File

@ -1088,6 +1088,14 @@ int __init acpi_sleep_init(void)
register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, register_sys_off_handler(SYS_OFF_MODE_POWER_OFF,
SYS_OFF_PRIO_FIRMWARE, SYS_OFF_PRIO_FIRMWARE,
acpi_power_off, NULL); acpi_power_off, NULL);
/*
* Windows uses S5 for reboot, so some BIOSes depend on it to
* perform proper reboot.
*/
register_sys_off_handler(SYS_OFF_MODE_RESTART_PREPARE,
SYS_OFF_PRIO_FIRMWARE,
acpi_power_off_prepare, NULL);
} else { } else {
acpi_no_s5 = true; acpi_no_s5 = true;
} }

View File

@ -18,6 +18,7 @@ static inline acpi_status acpi_set_waking_vector(u32 wakeup_address)
extern int acpi_s2idle_begin(void); extern int acpi_s2idle_begin(void);
extern int acpi_s2idle_prepare(void); extern int acpi_s2idle_prepare(void);
extern int acpi_s2idle_prepare_late(void); extern int acpi_s2idle_prepare_late(void);
extern void acpi_s2idle_check(void);
extern bool acpi_s2idle_wake(void); extern bool acpi_s2idle_wake(void);
extern void acpi_s2idle_restore_early(void); extern void acpi_s2idle_restore_early(void);
extern void acpi_s2idle_restore(void); extern void acpi_s2idle_restore(void);

Some files were not shown because too many files have changed in this diff Show More