102-overlayfs_fix_readdir_deadlock.patch 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. --- a/fs/overlayfs/overlayfs.c
  2. +++ b/fs/overlayfs/overlayfs.c
  3. @@ -248,8 +248,7 @@ static struct ovl_cache_entry *ovl_cache
  4. }
  5. static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len,
  6. - u64 ino, unsigned int d_type,
  7. - bool is_whiteout)
  8. + u64 ino, unsigned int d_type)
  9. {
  10. struct ovl_cache_entry *p;
  11. @@ -262,7 +261,7 @@ static struct ovl_cache_entry *ovl_cache
  12. p->len = len;
  13. p->type = d_type;
  14. p->ino = ino;
  15. - p->is_whiteout = is_whiteout;
  16. + p->is_whiteout = false;
  17. }
  18. return p;
  19. @@ -270,7 +269,7 @@ static struct ovl_cache_entry *ovl_cache
  20. static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
  21. const char *name, int len, u64 ino,
  22. - unsigned int d_type, bool is_whiteout)
  23. + unsigned int d_type)
  24. {
  25. struct rb_node **newp = &rdd->root->rb_node;
  26. struct rb_node *parent = NULL;
  27. @@ -291,11 +290,18 @@ static int ovl_cache_entry_add_rb(struct
  28. return 0;
  29. }
  30. - p = ovl_cache_entry_new(name, len, ino, d_type, is_whiteout);
  31. + p = ovl_cache_entry_new(name, len, ino, d_type);
  32. if (p == NULL)
  33. return -ENOMEM;
  34. - list_add_tail(&p->l_node, rdd->list);
  35. + /*
  36. + * Add links before other types to be able to quicky mark
  37. + * any whiteout entries
  38. + */
  39. + if (d_type == DT_LNK)
  40. + list_add(&p->l_node, rdd->list);
  41. + else
  42. + list_add_tail(&p->l_node, rdd->list);
  43. rb_link_node(&p->node, parent, newp);
  44. rb_insert_color(&p->node, rdd->root);
  45. @@ -313,7 +319,7 @@ static int ovl_fill_lower(void *buf, con
  46. if (p) {
  47. list_move_tail(&p->l_node, rdd->middle);
  48. } else {
  49. - p = ovl_cache_entry_new(name, namelen, ino, d_type, false);
  50. + p = ovl_cache_entry_new(name, namelen, ino, d_type);
  51. if (p == NULL)
  52. rdd->err = -ENOMEM;
  53. else
  54. @@ -338,26 +344,9 @@ static int ovl_fill_upper(void *buf, con
  55. loff_t offset, u64 ino, unsigned int d_type)
  56. {
  57. struct ovl_readdir_data *rdd = buf;
  58. - bool is_whiteout = false;
  59. rdd->count++;
  60. - if (d_type == DT_LNK) {
  61. - struct dentry *dentry;
  62. -
  63. - dentry = lookup_one_len(name, rdd->dir, namelen);
  64. - if (IS_ERR(dentry)) {
  65. - rdd->err = PTR_ERR(dentry);
  66. - goto out;
  67. - }
  68. - is_whiteout = ovl_is_whiteout(dentry);
  69. - dput(dentry);
  70. - }
  71. -
  72. - rdd->err = ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type,
  73. - is_whiteout);
  74. -
  75. -out:
  76. - return rdd->err;
  77. + return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type);
  78. }
  79. static int ovl_dir_read(struct path *realpath, struct ovl_readdir_data *rdd,
  80. @@ -423,6 +412,26 @@ static void ovl_dir_reset(struct file *f
  81. }
  82. }
  83. +static void ovl_dir_mark_whiteouts(struct ovl_readdir_data *rdd)
  84. +{
  85. + struct ovl_cache_entry *p;
  86. + struct dentry *dentry;
  87. +
  88. + mutex_lock(&rdd->dir->d_inode->i_mutex);
  89. + list_for_each_entry(p, rdd->list, l_node) {
  90. + if (p->type != DT_LNK)
  91. + break;
  92. +
  93. + dentry = lookup_one_len(p->name, rdd->dir, p->len);
  94. + if (IS_ERR(dentry))
  95. + continue;
  96. +
  97. + p->is_whiteout = ovl_is_whiteout(dentry);
  98. + dput(dentry);
  99. + }
  100. + mutex_unlock(&rdd->dir->d_inode->i_mutex);
  101. +}
  102. +
  103. static int ovl_dir_read_merged(struct path *upperpath, struct path *lowerpath,
  104. struct ovl_readdir_data *rdd)
  105. {
  106. @@ -436,6 +445,8 @@ static int ovl_dir_read_merged(struct pa
  107. err = ovl_dir_read(upperpath, rdd, ovl_fill_upper);
  108. if (err)
  109. goto out;
  110. +
  111. + ovl_dir_mark_whiteouts(rdd);
  112. }
  113. /*
  114. * Insert lowerpath entries before upperpath ones, this allows